2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
54 * NOTES: On the outbound side of things I need to check the sack timer to
55 * see if I should generate a sack into the chunk queue (if I have data to
56 * send that is and will be sending it .. for bundling.
58 * The callback in sctp_usrreq.c will get called when the socket is read from.
59 * This will cause sctp_service_queues() to get called on the top entry in
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64 struct sctp_stream_in *strm,
65 struct sctp_tcb *stcb,
66 struct sctp_association *asoc,
67 struct sctp_tmit_chunk *chk, int lock_held);
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 /* Calculate what the rwnd would be */
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
83 * This is really set wrong with respect to a 1-2-m socket. Since
84 * the sb_cc is the count that everyone as put up. When we re-write
85 * sctp_soreceive then we will fix this so that ONLY this
86 * associations data is taken into account.
88 if (stcb->sctp_socket == NULL) {
91 if (stcb->asoc.sb_cc == 0 &&
92 asoc->size_on_reasm_queue == 0 &&
93 asoc->size_on_all_streams == 0) {
94 /* Full rwnd granted */
95 KASSERT(asoc->cnt_on_reasm_queue == 0, ("cnt_on_reasm_queue is %u", asoc->cnt_on_reasm_queue));
96 KASSERT(asoc->cnt_on_all_streams == 0, ("cnt_on_all_streams is %u", asoc->cnt_on_all_streams));
97 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
100 /* get actual space */
101 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
103 * take out what has NOT been put on socket queue and we yet hold
106 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
107 asoc->cnt_on_reasm_queue * MSIZE));
108 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
109 asoc->cnt_on_all_streams * MSIZE));
114 /* what is the overhead of all these rwnd's */
115 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
117 * If the window gets too small due to ctrl-stuff, reduce it to 1,
118 * even it is 0. SWS engaged
120 if (calc < stcb->asoc.my_rwnd_control_len) {
129 * Build out our readq entry based on the incoming packet.
131 struct sctp_queued_to_read *
132 sctp_build_readq_entry(struct sctp_tcb *stcb,
133 struct sctp_nets *net,
134 uint32_t tsn, uint32_t ppid,
135 uint32_t context, uint16_t sid,
136 uint32_t mid, uint8_t flags,
139 struct sctp_queued_to_read *read_queue_e = NULL;
141 sctp_alloc_a_readq(stcb, read_queue_e);
142 if (read_queue_e == NULL) {
145 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
146 read_queue_e->sinfo_stream = sid;
147 read_queue_e->sinfo_flags = (flags << 8);
148 read_queue_e->sinfo_ppid = ppid;
149 read_queue_e->sinfo_context = context;
150 read_queue_e->sinfo_tsn = tsn;
151 read_queue_e->sinfo_cumtsn = tsn;
152 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
153 read_queue_e->mid = mid;
154 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
155 TAILQ_INIT(&read_queue_e->reasm);
156 read_queue_e->whoFrom = net;
157 atomic_add_int(&net->ref_count, 1);
158 read_queue_e->data = dm;
159 read_queue_e->stcb = stcb;
160 read_queue_e->port_from = stcb->rport;
162 return (read_queue_e);
166 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
168 struct sctp_extrcvinfo *seinfo;
169 struct sctp_sndrcvinfo *outinfo;
170 struct sctp_rcvinfo *rcvinfo;
171 struct sctp_nxtinfo *nxtinfo;
178 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
179 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
180 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
181 /* user does not want any ancillary data */
185 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
186 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
188 seinfo = (struct sctp_extrcvinfo *)sinfo;
189 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
190 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
192 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
196 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
197 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
199 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
202 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
208 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
213 SCTP_BUF_LEN(ret) = 0;
215 /* We need a CMSG header followed by the struct */
216 cmh = mtod(ret, struct cmsghdr *);
218 * Make sure that there is no un-initialized padding between the
219 * cmsg header and cmsg data and after the cmsg data.
222 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
223 cmh->cmsg_level = IPPROTO_SCTP;
224 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
225 cmh->cmsg_type = SCTP_RCVINFO;
226 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
227 rcvinfo->rcv_sid = sinfo->sinfo_stream;
228 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
229 rcvinfo->rcv_flags = sinfo->sinfo_flags;
230 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
231 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
232 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
233 rcvinfo->rcv_context = sinfo->sinfo_context;
234 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
235 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
236 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
239 cmh->cmsg_level = IPPROTO_SCTP;
240 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
241 cmh->cmsg_type = SCTP_NXTINFO;
242 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
243 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
244 nxtinfo->nxt_flags = 0;
245 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
246 nxtinfo->nxt_flags |= SCTP_UNORDERED;
248 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
249 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
251 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
252 nxtinfo->nxt_flags |= SCTP_COMPLETE;
254 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
255 nxtinfo->nxt_length = seinfo->serinfo_next_length;
256 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
257 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
258 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
260 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
261 cmh->cmsg_level = IPPROTO_SCTP;
262 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
264 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
265 cmh->cmsg_type = SCTP_EXTRCV;
266 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
267 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
269 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
270 cmh->cmsg_type = SCTP_SNDRCV;
272 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
280 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
282 uint32_t gap, i, cumackp1;
284 int in_r = 0, in_nr = 0;
286 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
289 cumackp1 = asoc->cumulative_tsn + 1;
290 if (SCTP_TSN_GT(cumackp1, tsn)) {
292 * this tsn is behind the cum ack and thus we don't need to
293 * worry about it being moved from one to the other.
297 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
298 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
299 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
300 if ((in_r == 0) && (in_nr == 0)) {
302 panic("Things are really messed up now");
304 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
305 sctp_print_mapping_array(asoc);
309 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
311 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
312 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
313 asoc->highest_tsn_inside_nr_map = tsn;
315 if (tsn == asoc->highest_tsn_inside_map) {
316 /* We must back down to see what the new highest is */
317 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
318 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
319 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
320 asoc->highest_tsn_inside_map = i;
326 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
332 sctp_place_control_in_stream(struct sctp_stream_in *strm,
333 struct sctp_association *asoc,
334 struct sctp_queued_to_read *control)
336 struct sctp_queued_to_read *at;
337 struct sctp_readhead *q;
338 uint8_t flags, unordered;
340 flags = (control->sinfo_flags >> 8);
341 unordered = flags & SCTP_DATA_UNORDERED;
343 q = &strm->uno_inqueue;
344 if (asoc->idata_supported == 0) {
345 if (!TAILQ_EMPTY(q)) {
347 * Only one stream can be here in old style
352 TAILQ_INSERT_TAIL(q, control, next_instrm);
353 control->on_strm_q = SCTP_ON_UNORDERED;
359 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
360 control->end_added = 1;
361 control->first_frag_seen = 1;
362 control->last_frag_seen = 1;
364 if (TAILQ_EMPTY(q)) {
366 TAILQ_INSERT_HEAD(q, control, next_instrm);
368 control->on_strm_q = SCTP_ON_UNORDERED;
370 control->on_strm_q = SCTP_ON_ORDERED;
374 TAILQ_FOREACH(at, q, next_instrm) {
375 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
377 * one in queue is bigger than the new one,
378 * insert before this one
380 TAILQ_INSERT_BEFORE(at, control, next_instrm);
382 control->on_strm_q = SCTP_ON_UNORDERED;
384 control->on_strm_q = SCTP_ON_ORDERED;
387 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
389 * Gak, He sent me a duplicate msg id
390 * number?? return -1 to abort.
394 if (TAILQ_NEXT(at, next_instrm) == NULL) {
396 * We are at the end, insert it
399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
400 sctp_log_strm_del(control, at,
401 SCTP_STR_LOG_FROM_INSERT_TL);
403 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
405 control->on_strm_q = SCTP_ON_UNORDERED;
407 control->on_strm_q = SCTP_ON_ORDERED;
418 sctp_abort_in_reasm(struct sctp_tcb *stcb,
419 struct sctp_queued_to_read *control,
420 struct sctp_tmit_chunk *chk,
421 int *abort_flag, int opspot)
423 char msg[SCTP_DIAG_INFO_LEN];
426 if (stcb->asoc.idata_supported) {
427 snprintf(msg, sizeof(msg),
428 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
430 control->fsn_included,
433 chk->rec.data.fsn, chk->rec.data.mid);
435 snprintf(msg, sizeof(msg),
436 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
438 control->fsn_included,
442 (uint16_t)chk->rec.data.mid);
444 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
445 sctp_m_freem(chk->data);
447 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
448 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
449 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
454 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
457 * The control could not be placed and must be cleaned.
459 struct sctp_tmit_chunk *chk, *nchk;
461 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
462 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
464 sctp_m_freem(chk->data);
466 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
468 sctp_free_a_readq(stcb, control);
472 * Queue the chunk either right into the socket buffer if it is the next one
473 * to go OR put it in the correct place in the delivery queue. If we do
474 * append to the so_buf, keep doing so until we are out of order as
475 * long as the control's entered are non-fragmented.
478 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
479 struct sctp_association *asoc,
480 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
483 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
484 * all the data in one stream this could happen quite rapidly. One
485 * could use the TSN to keep track of things, but this scheme breaks
486 * down in the other type of stream usage that could occur. Send a
487 * single msg to stream 0, send 4Billion messages to stream 1, now
488 * send a message to stream 0. You have a situation where the TSN
489 * has wrapped but not in the stream. Is this worth worrying about
490 * or should we just change our queue sort at the bottom to be by
493 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
494 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
495 * assignment this could happen... and I don't see how this would be
496 * a violation. So for now I am undecided an will leave the sort by
497 * SSN alone. Maybe a hybred approach is the answer
500 struct sctp_queued_to_read *at;
504 struct sctp_stream_in *strm;
505 char msg[SCTP_DIAG_INFO_LEN];
507 strm = &asoc->strmin[control->sinfo_stream];
508 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
509 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
511 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
512 /* The incoming sseq is behind where we last delivered? */
513 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
514 strm->last_mid_delivered, control->mid);
516 * throw it in the stream so it gets cleaned up in
517 * association destruction
519 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
520 if (asoc->idata_supported) {
521 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
522 strm->last_mid_delivered, control->sinfo_tsn,
523 control->sinfo_stream, control->mid);
525 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
526 (uint16_t)strm->last_mid_delivered,
528 control->sinfo_stream,
529 (uint16_t)control->mid);
531 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
532 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
533 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
539 asoc->size_on_all_streams += control->length;
540 sctp_ucount_incr(asoc->cnt_on_all_streams);
541 nxt_todel = strm->last_mid_delivered + 1;
542 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
543 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
546 so = SCTP_INP_SO(stcb->sctp_ep);
547 atomic_add_int(&stcb->asoc.refcnt, 1);
548 SCTP_TCB_UNLOCK(stcb);
549 SCTP_SOCKET_LOCK(so, 1);
551 atomic_subtract_int(&stcb->asoc.refcnt, 1);
552 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
553 SCTP_SOCKET_UNLOCK(so, 1);
557 /* can be delivered right away? */
558 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
559 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
561 /* EY it wont be queued if it could be delivered directly */
563 if (asoc->size_on_all_streams >= control->length) {
564 asoc->size_on_all_streams -= control->length;
567 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
569 asoc->size_on_all_streams = 0;
572 sctp_ucount_decr(asoc->cnt_on_all_streams);
573 strm->last_mid_delivered++;
574 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
575 sctp_add_to_readq(stcb->sctp_ep, stcb,
577 &stcb->sctp_socket->so_rcv, 1,
578 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
579 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
581 nxt_todel = strm->last_mid_delivered + 1;
582 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
583 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
584 if (control->on_strm_q == SCTP_ON_ORDERED) {
585 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
586 if (asoc->size_on_all_streams >= control->length) {
587 asoc->size_on_all_streams -= control->length;
590 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
592 asoc->size_on_all_streams = 0;
595 sctp_ucount_decr(asoc->cnt_on_all_streams);
598 panic("Huh control: %p is on_strm_q: %d",
599 control, control->on_strm_q);
602 control->on_strm_q = 0;
603 strm->last_mid_delivered++;
605 * We ignore the return of deliver_data here
606 * since we always can hold the chunk on the
607 * d-queue. And we have a finite number that
608 * can be delivered from the strq.
610 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
611 sctp_log_strm_del(control, NULL,
612 SCTP_STR_LOG_FROM_IMMED_DEL);
614 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
615 sctp_add_to_readq(stcb->sctp_ep, stcb,
617 &stcb->sctp_socket->so_rcv, 1,
618 SCTP_READ_LOCK_NOT_HELD,
621 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
626 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
627 SCTP_SOCKET_UNLOCK(so, 1);
632 * Ok, we did not deliver this guy, find the correct place
633 * to put it on the queue.
635 if (sctp_place_control_in_stream(strm, asoc, control)) {
636 snprintf(msg, sizeof(msg),
637 "Queue to str MID: %u duplicate",
639 sctp_clean_up_control(stcb, control);
640 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
641 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
642 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
650 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
652 struct mbuf *m, *prev = NULL;
653 struct sctp_tcb *stcb;
655 stcb = control->stcb;
656 control->held_length = 0;
660 if (SCTP_BUF_LEN(m) == 0) {
661 /* Skip mbufs with NO length */
664 control->data = sctp_m_free(m);
667 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
668 m = SCTP_BUF_NEXT(prev);
671 control->tail_mbuf = prev;
676 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
677 if (control->on_read_q) {
679 * On read queue so we must increment the SB stuff,
680 * we assume caller has done any locks of SB.
682 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
684 m = SCTP_BUF_NEXT(m);
687 control->tail_mbuf = prev;
692 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
694 struct mbuf *prev = NULL;
695 struct sctp_tcb *stcb;
697 stcb = control->stcb;
700 panic("Control broken");
705 if (control->tail_mbuf == NULL) {
708 sctp_setup_tail_pointer(control);
711 control->tail_mbuf->m_next = m;
713 if (SCTP_BUF_LEN(m) == 0) {
714 /* Skip mbufs with NO length */
717 control->tail_mbuf->m_next = sctp_m_free(m);
718 m = control->tail_mbuf->m_next;
720 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
721 m = SCTP_BUF_NEXT(prev);
724 control->tail_mbuf = prev;
729 if (control->on_read_q) {
731 * On read queue so we must increment the SB stuff,
732 * we assume caller has done any locks of SB.
734 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
736 *added += SCTP_BUF_LEN(m);
737 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
738 m = SCTP_BUF_NEXT(m);
741 control->tail_mbuf = prev;
746 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
748 memset(nc, 0, sizeof(struct sctp_queued_to_read));
749 nc->sinfo_stream = control->sinfo_stream;
750 nc->mid = control->mid;
751 TAILQ_INIT(&nc->reasm);
752 nc->top_fsn = control->top_fsn;
753 nc->mid = control->mid;
754 nc->sinfo_flags = control->sinfo_flags;
755 nc->sinfo_ppid = control->sinfo_ppid;
756 nc->sinfo_context = control->sinfo_context;
757 nc->fsn_included = 0xffffffff;
758 nc->sinfo_tsn = control->sinfo_tsn;
759 nc->sinfo_cumtsn = control->sinfo_cumtsn;
760 nc->sinfo_assoc_id = control->sinfo_assoc_id;
761 nc->whoFrom = control->whoFrom;
762 atomic_add_int(&nc->whoFrom->ref_count, 1);
763 nc->stcb = control->stcb;
764 nc->port_from = control->port_from;
768 sctp_reset_a_control(struct sctp_queued_to_read *control,
769 struct sctp_inpcb *inp, uint32_t tsn)
771 control->fsn_included = tsn;
772 if (control->on_read_q) {
774 * We have to purge it from there, hopefully this will work
777 TAILQ_REMOVE(&inp->read_queue, control, next);
778 control->on_read_q = 0;
783 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
784 struct sctp_association *asoc,
785 struct sctp_stream_in *strm,
786 struct sctp_queued_to_read *control,
788 int inp_read_lock_held)
791 * Special handling for the old un-ordered data chunk. All the
792 * chunks/TSN's go to mid 0. So we have to do the old style watching
793 * to see if we have it all. If you return one, no other control
794 * entries on the un-ordered queue will be looked at. In theory
795 * there should be no others entries in reality, unless the guy is
796 * sending both unordered NDATA and unordered DATA...
798 struct sctp_tmit_chunk *chk, *lchk, *tchk;
800 struct sctp_queued_to_read *nc;
803 if (control->first_frag_seen == 0) {
804 /* Nothing we can do, we have not seen the first piece yet */
807 /* Collapse any we can */
810 fsn = control->fsn_included + 1;
811 /* Now what can we add? */
812 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
813 if (chk->rec.data.fsn == fsn) {
815 sctp_alloc_a_readq(stcb, nc);
819 memset(nc, 0, sizeof(struct sctp_queued_to_read));
820 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
821 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
825 if (control->end_added) {
827 if (!TAILQ_EMPTY(&control->reasm)) {
829 * Ok we have to move anything left
830 * on the control queue to a new
833 sctp_build_readq_entry_from_ctl(nc, control);
834 tchk = TAILQ_FIRST(&control->reasm);
835 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
836 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
837 if (asoc->size_on_reasm_queue >= tchk->send_size) {
838 asoc->size_on_reasm_queue -= tchk->send_size;
841 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
843 asoc->size_on_reasm_queue = 0;
846 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
847 nc->first_frag_seen = 1;
848 nc->fsn_included = tchk->rec.data.fsn;
849 nc->data = tchk->data;
850 nc->sinfo_ppid = tchk->rec.data.ppid;
851 nc->sinfo_tsn = tchk->rec.data.tsn;
852 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
854 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
855 sctp_setup_tail_pointer(nc);
856 tchk = TAILQ_FIRST(&control->reasm);
858 /* Spin the rest onto the queue */
860 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
861 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
862 tchk = TAILQ_FIRST(&control->reasm);
865 * Now lets add it to the queue
866 * after removing control
868 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
869 nc->on_strm_q = SCTP_ON_UNORDERED;
870 if (control->on_strm_q) {
871 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
872 control->on_strm_q = 0;
875 if (control->pdapi_started) {
876 strm->pd_api_started = 0;
877 control->pdapi_started = 0;
879 if (control->on_strm_q) {
880 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
881 control->on_strm_q = 0;
882 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
884 if (control->on_read_q == 0) {
885 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
886 &stcb->sctp_socket->so_rcv, control->end_added,
887 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
889 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
890 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
892 * Switch to the new guy and
898 if (nc->on_strm_q == 0) {
899 sctp_free_a_readq(stcb, nc);
904 sctp_free_a_readq(stcb, nc);
911 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
912 strm->pd_api_started = 1;
913 control->pdapi_started = 1;
914 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
915 &stcb->sctp_socket->so_rcv, control->end_added,
916 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
917 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
925 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
926 struct sctp_association *asoc,
927 struct sctp_queued_to_read *control,
928 struct sctp_tmit_chunk *chk,
931 struct sctp_tmit_chunk *at;
935 * Here we need to place the chunk into the control structure sorted
936 * in the correct order.
938 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
939 /* Its the very first one. */
940 SCTPDBG(SCTP_DEBUG_XXX,
941 "chunk is a first fsn: %u becomes fsn_included\n",
943 if (control->first_frag_seen) {
945 * In old un-ordered we can reassembly on one
946 * control multiple messages. As long as the next
947 * FIRST is greater then the old first (TSN i.e. FSN
953 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
955 * Easy way the start of a new guy beyond
960 if ((chk->rec.data.fsn == control->fsn_included) ||
961 (control->pdapi_started)) {
963 * Ok this should not happen, if it does we
964 * started the pd-api on the higher TSN
965 * (since the equals part is a TSN failure
968 * We are completly hosed in that case since
969 * I have no way to recover. This really
970 * will only happen if we can get more TSN's
971 * higher before the pd-api-point.
973 sctp_abort_in_reasm(stcb, control, chk,
975 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
980 * Ok we have two firsts and the one we just got is
981 * smaller than the one we previously placed.. yuck!
982 * We must swap them out.
985 tdata = control->data;
986 control->data = chk->data;
988 /* Save the lengths */
989 chk->send_size = control->length;
990 /* Recompute length of control and tail pointer */
991 sctp_setup_tail_pointer(control);
992 /* Fix the FSN included */
993 tmp = control->fsn_included;
994 control->fsn_included = chk->rec.data.fsn;
995 chk->rec.data.fsn = tmp;
996 /* Fix the TSN included */
997 tmp = control->sinfo_tsn;
998 control->sinfo_tsn = chk->rec.data.tsn;
999 chk->rec.data.tsn = tmp;
1000 /* Fix the PPID included */
1001 tmp = control->sinfo_ppid;
1002 control->sinfo_ppid = chk->rec.data.ppid;
1003 chk->rec.data.ppid = tmp;
1004 /* Fix tail pointer */
1007 control->first_frag_seen = 1;
1008 control->fsn_included = chk->rec.data.fsn;
1009 control->top_fsn = chk->rec.data.fsn;
1010 control->sinfo_tsn = chk->rec.data.tsn;
1011 control->sinfo_ppid = chk->rec.data.ppid;
1012 control->data = chk->data;
1013 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1015 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1016 sctp_setup_tail_pointer(control);
1021 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1022 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1024 * This one in queue is bigger than the new one,
1025 * insert the new one before at.
1027 asoc->size_on_reasm_queue += chk->send_size;
1028 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1030 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1032 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1034 * They sent a duplicate fsn number. This really
1035 * should not happen since the FSN is a TSN and it
1036 * should have been dropped earlier.
1038 sctp_abort_in_reasm(stcb, control, chk,
1040 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1044 if (inserted == 0) {
1045 /* Its at the end */
1046 asoc->size_on_reasm_queue += chk->send_size;
1047 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1048 control->top_fsn = chk->rec.data.fsn;
1049 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1054 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1055 struct sctp_stream_in *strm, int inp_read_lock_held)
1058 * Given a stream, strm, see if any of the SSN's on it that are
1059 * fragmented are ready to deliver. If so go ahead and place them on
1060 * the read queue. In so placing if we have hit the end, then we
1061 * need to remove them from the stream's queue.
1063 struct sctp_queued_to_read *control, *nctl = NULL;
1064 uint32_t next_to_del;
1068 if (stcb->sctp_socket) {
1069 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1070 stcb->sctp_ep->partial_delivery_point);
1072 pd_point = stcb->sctp_ep->partial_delivery_point;
1074 control = TAILQ_FIRST(&strm->uno_inqueue);
1076 if ((control != NULL) &&
1077 (asoc->idata_supported == 0)) {
1078 /* Special handling needed for "old" data format */
1079 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1083 if (strm->pd_api_started) {
1084 /* Can't add more */
1088 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1089 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1090 nctl = TAILQ_NEXT(control, next_instrm);
1091 if (control->end_added) {
1092 /* We just put the last bit on */
1093 if (control->on_strm_q) {
1095 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1096 panic("Huh control: %p on_q: %d -- not unordered?",
1097 control, control->on_strm_q);
1100 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1101 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1102 control->on_strm_q = 0;
1104 if (control->on_read_q == 0) {
1105 sctp_add_to_readq(stcb->sctp_ep, stcb,
1107 &stcb->sctp_socket->so_rcv, control->end_added,
1108 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1111 /* Can we do a PD-API for this un-ordered guy? */
1112 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1113 strm->pd_api_started = 1;
1114 control->pdapi_started = 1;
1115 sctp_add_to_readq(stcb->sctp_ep, stcb,
1117 &stcb->sctp_socket->so_rcv, control->end_added,
1118 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1126 control = TAILQ_FIRST(&strm->inqueue);
1127 if (strm->pd_api_started) {
1128 /* Can't add more */
1131 if (control == NULL) {
1134 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1136 * Ok the guy at the top was being partially delivered
1137 * completed, so we remove it. Note the pd_api flag was
1138 * taken off when the chunk was merged on in
1139 * sctp_queue_data_for_reasm below.
1141 nctl = TAILQ_NEXT(control, next_instrm);
1142 SCTPDBG(SCTP_DEBUG_XXX,
1143 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1144 control, control->end_added, control->mid,
1145 control->top_fsn, control->fsn_included,
1146 strm->last_mid_delivered);
1147 if (control->end_added) {
1148 if (control->on_strm_q) {
1150 if (control->on_strm_q != SCTP_ON_ORDERED) {
1151 panic("Huh control: %p on_q: %d -- not ordered?",
1152 control, control->on_strm_q);
1155 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1156 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1157 if (asoc->size_on_all_streams >= control->length) {
1158 asoc->size_on_all_streams -= control->length;
1161 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1163 asoc->size_on_all_streams = 0;
1166 sctp_ucount_decr(asoc->cnt_on_all_streams);
1167 control->on_strm_q = 0;
1169 if (strm->pd_api_started && control->pdapi_started) {
1170 control->pdapi_started = 0;
1171 strm->pd_api_started = 0;
1173 if (control->on_read_q == 0) {
1174 sctp_add_to_readq(stcb->sctp_ep, stcb,
1176 &stcb->sctp_socket->so_rcv, control->end_added,
1177 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1182 if (strm->pd_api_started) {
1184 * Can't add more must have gotten an un-ordered above being
1185 * partially delivered.
1190 next_to_del = strm->last_mid_delivered + 1;
1192 SCTPDBG(SCTP_DEBUG_XXX,
1193 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1194 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1196 nctl = TAILQ_NEXT(control, next_instrm);
1197 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1198 (control->first_frag_seen)) {
1201 /* Ok we can deliver it onto the stream. */
1202 if (control->end_added) {
1203 /* We are done with it afterwards */
1204 if (control->on_strm_q) {
1206 if (control->on_strm_q != SCTP_ON_ORDERED) {
1207 panic("Huh control: %p on_q: %d -- not ordered?",
1208 control, control->on_strm_q);
1211 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1212 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1213 if (asoc->size_on_all_streams >= control->length) {
1214 asoc->size_on_all_streams -= control->length;
1217 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1219 asoc->size_on_all_streams = 0;
1222 sctp_ucount_decr(asoc->cnt_on_all_streams);
1223 control->on_strm_q = 0;
1227 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1229 * A singleton now slipping through - mark
1230 * it non-revokable too
1232 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1233 } else if (control->end_added == 0) {
1235 * Check if we can defer adding until its
1238 if ((control->length < pd_point) || (strm->pd_api_started)) {
1240 * Don't need it or cannot add more
1241 * (one being delivered that way)
1246 done = (control->end_added) && (control->last_frag_seen);
1247 if (control->on_read_q == 0) {
1248 sctp_add_to_readq(stcb->sctp_ep, stcb,
1250 &stcb->sctp_socket->so_rcv, control->end_added,
1251 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1253 strm->last_mid_delivered = next_to_del;
1258 /* We are now doing PD API */
1259 strm->pd_api_started = 1;
1260 control->pdapi_started = 1;
1270 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1271 struct sctp_stream_in *strm,
1272 struct sctp_tcb *stcb, struct sctp_association *asoc,
1273 struct sctp_tmit_chunk *chk, int hold_rlock)
1276 * Given a control and a chunk, merge the data from the chk onto the
1277 * control and free up the chunk resources.
1282 if (control->on_read_q && (hold_rlock == 0)) {
1284 * Its being pd-api'd so we must do some locks.
1286 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1289 if (control->data == NULL) {
1290 control->data = chk->data;
1291 sctp_setup_tail_pointer(control);
1293 sctp_add_to_tail_pointer(control, chk->data, &added);
1295 control->fsn_included = chk->rec.data.fsn;
1296 asoc->size_on_reasm_queue -= chk->send_size;
1297 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1298 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1300 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1301 control->first_frag_seen = 1;
1302 control->sinfo_tsn = chk->rec.data.tsn;
1303 control->sinfo_ppid = chk->rec.data.ppid;
1305 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1307 if ((control->on_strm_q) && (control->on_read_q)) {
1308 if (control->pdapi_started) {
1309 control->pdapi_started = 0;
1310 strm->pd_api_started = 0;
1312 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1314 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1315 control->on_strm_q = 0;
1316 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1318 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1319 if (asoc->size_on_all_streams >= control->length) {
1320 asoc->size_on_all_streams -= control->length;
1323 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1325 asoc->size_on_all_streams = 0;
1328 sctp_ucount_decr(asoc->cnt_on_all_streams);
1329 control->on_strm_q = 0;
1331 } else if (control->on_strm_q) {
1332 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1333 control->on_strm_q);
1337 control->end_added = 1;
1338 control->last_frag_seen = 1;
1341 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1343 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1348 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1349 * queue, see if anthing can be delivered. If so pull it off (or as much as
1350 * we can. If we run out of space then we must dump what we can and set the
1351 * appropriate flag to say we queued what we could.
1354 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1355 struct sctp_queued_to_read *control,
1356 struct sctp_tmit_chunk *chk,
1357 int created_control,
1358 int *abort_flag, uint32_t tsn)
1361 struct sctp_tmit_chunk *at, *nat;
1362 struct sctp_stream_in *strm;
1363 int do_wakeup, unordered;
1366 strm = &asoc->strmin[control->sinfo_stream];
1368 * For old un-ordered data chunks.
1370 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1375 /* Must be added to the stream-in queue */
1376 if (created_control) {
1377 if (unordered == 0) {
1378 sctp_ucount_incr(asoc->cnt_on_all_streams);
1380 if (sctp_place_control_in_stream(strm, asoc, control)) {
1381 /* Duplicate SSN? */
1382 sctp_clean_up_control(stcb, control);
1383 sctp_abort_in_reasm(stcb, control, chk,
1385 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1388 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1390 * Ok we created this control and now lets validate
1391 * that its legal i.e. there is a B bit set, if not
1392 * and we have up to the cum-ack then its invalid.
1394 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1395 sctp_abort_in_reasm(stcb, control, chk,
1397 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1402 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1403 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1407 * Ok we must queue the chunk into the reasembly portion: o if its
1408 * the first it goes to the control mbuf. o if its not first but the
1409 * next in sequence it goes to the control, and each succeeding one
1410 * in order also goes. o if its not in order we place it on the list
1413 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1414 /* Its the very first one. */
1415 SCTPDBG(SCTP_DEBUG_XXX,
1416 "chunk is a first fsn: %u becomes fsn_included\n",
1418 if (control->first_frag_seen) {
1420 * Error on senders part, they either sent us two
1421 * data chunks with FIRST, or they sent two
1422 * un-ordered chunks that were fragmented at the
1423 * same time in the same stream.
1425 sctp_abort_in_reasm(stcb, control, chk,
1427 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1430 control->first_frag_seen = 1;
1431 control->sinfo_ppid = chk->rec.data.ppid;
1432 control->sinfo_tsn = chk->rec.data.tsn;
1433 control->fsn_included = chk->rec.data.fsn;
1434 control->data = chk->data;
1435 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1437 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1438 sctp_setup_tail_pointer(control);
1439 asoc->size_on_all_streams += control->length;
1441 /* Place the chunk in our list */
1444 if (control->last_frag_seen == 0) {
1445 /* Still willing to raise highest FSN seen */
1446 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1447 SCTPDBG(SCTP_DEBUG_XXX,
1448 "We have a new top_fsn: %u\n",
1450 control->top_fsn = chk->rec.data.fsn;
1452 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1453 SCTPDBG(SCTP_DEBUG_XXX,
1454 "The last fsn is now in place fsn: %u\n",
1456 control->last_frag_seen = 1;
1458 if (asoc->idata_supported || control->first_frag_seen) {
1460 * For IDATA we always check since we know
1461 * that the first fragment is 0. For old
1462 * DATA we have to receive the first before
1463 * we know the first FSN (which is the TSN).
1465 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1467 * We have already delivered up to
1470 sctp_abort_in_reasm(stcb, control, chk,
1472 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1477 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1478 /* Second last? huh? */
1479 SCTPDBG(SCTP_DEBUG_XXX,
1480 "Duplicate last fsn: %u (top: %u) -- abort\n",
1481 chk->rec.data.fsn, control->top_fsn);
1482 sctp_abort_in_reasm(stcb, control,
1484 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1487 if (asoc->idata_supported || control->first_frag_seen) {
1489 * For IDATA we always check since we know
1490 * that the first fragment is 0. For old
1491 * DATA we have to receive the first before
1492 * we know the first FSN (which is the TSN).
1495 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1497 * We have already delivered up to
1500 SCTPDBG(SCTP_DEBUG_XXX,
1501 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1502 chk->rec.data.fsn, control->fsn_included);
1503 sctp_abort_in_reasm(stcb, control, chk,
1505 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1510 * validate not beyond top FSN if we have seen last
1513 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1514 SCTPDBG(SCTP_DEBUG_XXX,
1515 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1518 sctp_abort_in_reasm(stcb, control, chk,
1520 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1525 * If we reach here, we need to place the new chunk in the
1526 * reassembly for this control.
1528 SCTPDBG(SCTP_DEBUG_XXX,
1529 "chunk is a not first fsn: %u needs to be inserted\n",
1531 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1532 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1534 * This one in queue is bigger than the new
1535 * one, insert the new one before at.
1537 SCTPDBG(SCTP_DEBUG_XXX,
1538 "Insert it before fsn: %u\n",
1540 asoc->size_on_reasm_queue += chk->send_size;
1541 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1542 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1545 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1547 * Gak, He sent me a duplicate str seq
1551 * foo bar, I guess I will just free this
1552 * new guy, should we abort too? FIX ME
1553 * MAYBE? Or it COULD be that the SSN's have
1554 * wrapped. Maybe I should compare to TSN
1555 * somehow... sigh for now just blow away
1558 SCTPDBG(SCTP_DEBUG_XXX,
1559 "Duplicate to fsn: %u -- abort\n",
1561 sctp_abort_in_reasm(stcb, control,
1563 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1567 if (inserted == 0) {
1568 /* Goes on the end */
1569 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1571 asoc->size_on_reasm_queue += chk->send_size;
1572 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1573 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1577 * Ok lets see if we can suck any up into the control structure that
1578 * are in seq if it makes sense.
1582 * If the first fragment has not been seen there is no sense in
1585 if (control->first_frag_seen) {
1586 next_fsn = control->fsn_included + 1;
1587 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1588 if (at->rec.data.fsn == next_fsn) {
1589 /* We can add this one now to the control */
1590 SCTPDBG(SCTP_DEBUG_XXX,
1591 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1594 next_fsn, control->fsn_included);
1595 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1596 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1597 asoc->size_on_all_streams += lenadded;
1598 if (control->on_read_q) {
1602 if (control->end_added && control->pdapi_started) {
1603 if (strm->pd_api_started) {
1604 strm->pd_api_started = 0;
1605 control->pdapi_started = 0;
1607 if (control->on_read_q == 0) {
1608 sctp_add_to_readq(stcb->sctp_ep, stcb,
1610 &stcb->sctp_socket->so_rcv, control->end_added,
1611 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1622 /* Need to wakeup the reader */
1623 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1627 static struct sctp_queued_to_read *
1628 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1630 struct sctp_queued_to_read *control;
1633 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1634 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1639 if (idata_supported) {
1640 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1641 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1646 control = TAILQ_FIRST(&strm->uno_inqueue);
1653 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1654 struct mbuf **m, int offset, int chk_length,
1655 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1656 int *break_flag, int last_chunk, uint8_t chk_type)
1658 /* Process a data chunk */
1659 /* struct sctp_tmit_chunk *chk; */
1660 struct sctp_tmit_chunk *chk;
1661 uint32_t tsn, fsn, gap, mid;
1664 int need_reasm_check = 0;
1666 struct mbuf *op_err;
1667 char msg[SCTP_DIAG_INFO_LEN];
1668 struct sctp_queued_to_read *control, *ncontrol;
1671 struct sctp_stream_reset_list *liste;
1674 int created_control = 0;
1676 if (chk_type == SCTP_IDATA) {
1677 struct sctp_idata_chunk *chunk, chunk_buf;
1679 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1680 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1681 chk_flags = chunk->ch.chunk_flags;
1682 clen = sizeof(struct sctp_idata_chunk);
1683 tsn = ntohl(chunk->dp.tsn);
1684 sid = ntohs(chunk->dp.sid);
1685 mid = ntohl(chunk->dp.mid);
1686 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1688 ppid = chunk->dp.ppid_fsn.ppid;
1690 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1691 ppid = 0xffffffff; /* Use as an invalid value. */
1694 struct sctp_data_chunk *chunk, chunk_buf;
1696 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1697 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1698 chk_flags = chunk->ch.chunk_flags;
1699 clen = sizeof(struct sctp_data_chunk);
1700 tsn = ntohl(chunk->dp.tsn);
1701 sid = ntohs(chunk->dp.sid);
1702 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1704 ppid = chunk->dp.ppid;
1706 if ((size_t)chk_length == clen) {
1708 * Need to send an abort since we had a empty data chunk.
1710 op_err = sctp_generate_no_user_data_cause(tsn);
1711 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1712 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1716 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1717 asoc->send_sack = 1;
1719 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1720 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1721 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1726 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1727 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1728 /* It is a duplicate */
1729 SCTP_STAT_INCR(sctps_recvdupdata);
1730 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1731 /* Record a dup for the next outbound sack */
1732 asoc->dup_tsns[asoc->numduptsns] = tsn;
1735 asoc->send_sack = 1;
1738 /* Calculate the number of TSN's between the base and this TSN */
1739 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1740 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1741 /* Can't hold the bit in the mapping at max array, toss it */
1744 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1745 SCTP_TCB_LOCK_ASSERT(stcb);
1746 if (sctp_expand_mapping_array(asoc, gap)) {
1747 /* Can't expand, drop it */
1751 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1754 /* See if we have received this one already */
1755 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1756 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1757 SCTP_STAT_INCR(sctps_recvdupdata);
1758 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1759 /* Record a dup for the next outbound sack */
1760 asoc->dup_tsns[asoc->numduptsns] = tsn;
1763 asoc->send_sack = 1;
1767 * Check to see about the GONE flag, duplicates would cause a sack
1768 * to be sent up above
1770 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1771 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1772 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1774 * wait a minute, this guy is gone, there is no longer a
1775 * receiver. Send peer an ABORT!
1777 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1778 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1783 * Now before going further we see if there is room. If NOT then we
1784 * MAY let one through only IF this TSN is the one we are waiting
1785 * for on a partial delivery API.
1788 /* Is the stream valid? */
1789 if (sid >= asoc->streamincnt) {
1790 struct sctp_error_invalid_stream *cause;
1792 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1793 0, M_NOWAIT, 1, MT_DATA);
1794 if (op_err != NULL) {
1795 /* add some space up front so prepend will work well */
1796 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1797 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1799 * Error causes are just param's and this one has
1800 * two back to back phdr, one with the error type
1801 * and size, the other with the streamid and a rsvd
1803 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1804 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1805 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1806 cause->stream_id = htons(sid);
1807 cause->reserved = htons(0);
1808 sctp_queue_op_err(stcb, op_err);
1810 SCTP_STAT_INCR(sctps_badsid);
1811 SCTP_TCB_LOCK_ASSERT(stcb);
1812 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1813 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1814 asoc->highest_tsn_inside_nr_map = tsn;
1816 if (tsn == (asoc->cumulative_tsn + 1)) {
1817 /* Update cum-ack */
1818 asoc->cumulative_tsn = tsn;
1823 * If its a fragmented message, lets see if we can find the control
1824 * on the reassembly queues.
1826 if ((chk_type == SCTP_IDATA) &&
1827 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1830 * The first *must* be fsn 0, and other (middle/end) pieces
1831 * can *not* be fsn 0. XXX: This can happen in case of a
1832 * wrap around. Ignore is for now.
1834 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1838 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1839 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1840 chk_flags, control);
1841 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1842 /* See if we can find the re-assembly entity */
1843 if (control != NULL) {
1844 /* We found something, does it belong? */
1845 if (ordered && (mid != control->mid)) {
1846 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1848 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1849 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1850 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1854 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1856 * We can't have a switched order with an
1859 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1863 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1865 * We can't have a switched unordered with a
1868 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1875 * Its a complete segment. Lets validate we don't have a
1876 * re-assembly going on with the same Stream/Seq (for
1877 * ordered) or in the same Stream for unordered.
1879 if (control != NULL) {
1880 if (ordered || asoc->idata_supported) {
1881 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1883 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1886 if ((tsn == control->fsn_included + 1) &&
1887 (control->end_added == 0)) {
1888 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1896 /* now do the tests */
1897 if (((asoc->cnt_on_all_streams +
1898 asoc->cnt_on_reasm_queue +
1899 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1900 (((int)asoc->my_rwnd) <= 0)) {
1902 * When we have NO room in the rwnd we check to make sure
1903 * the reader is doing its job...
1905 if (stcb->sctp_socket->so_rcv.sb_cc) {
1906 /* some to read, wake-up */
1907 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1910 so = SCTP_INP_SO(stcb->sctp_ep);
1911 atomic_add_int(&stcb->asoc.refcnt, 1);
1912 SCTP_TCB_UNLOCK(stcb);
1913 SCTP_SOCKET_LOCK(so, 1);
1914 SCTP_TCB_LOCK(stcb);
1915 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1916 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1917 /* assoc was freed while we were unlocked */
1918 SCTP_SOCKET_UNLOCK(so, 1);
1922 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1923 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1924 SCTP_SOCKET_UNLOCK(so, 1);
1927 /* now is it in the mapping array of what we have accepted? */
1928 if (chk_type == SCTP_DATA) {
1929 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1930 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1931 /* Nope not in the valid range dump it */
1933 sctp_set_rwnd(stcb, asoc);
1934 if ((asoc->cnt_on_all_streams +
1935 asoc->cnt_on_reasm_queue +
1936 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1937 SCTP_STAT_INCR(sctps_datadropchklmt);
1939 SCTP_STAT_INCR(sctps_datadroprwnd);
1945 if (control == NULL) {
1948 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1953 #ifdef SCTP_ASOCLOG_OF_TSNS
1954 SCTP_TCB_LOCK_ASSERT(stcb);
1955 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1956 asoc->tsn_in_at = 0;
1957 asoc->tsn_in_wrapped = 1;
1959 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1960 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1961 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1962 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1963 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1964 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1965 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1966 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1970 * Before we continue lets validate that we are not being fooled by
1971 * an evil attacker. We can only have Nk chunks based on our TSN
1972 * spread allowed by the mapping array N * 8 bits, so there is no
1973 * way our stream sequence numbers could have wrapped. We of course
1974 * only validate the FIRST fragment so the bit must be set.
1976 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1977 (TAILQ_EMPTY(&asoc->resetHead)) &&
1978 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1979 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1980 /* The incoming sseq is behind where we last delivered? */
1981 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1982 mid, asoc->strmin[sid].last_mid_delivered);
1984 if (asoc->idata_supported) {
1985 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
1986 asoc->strmin[sid].last_mid_delivered,
1991 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1992 (uint16_t)asoc->strmin[sid].last_mid_delivered,
1997 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1998 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1999 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2003 if (chk_type == SCTP_IDATA) {
2004 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2006 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2008 if (last_chunk == 0) {
2009 if (chk_type == SCTP_IDATA) {
2010 dmbuf = SCTP_M_COPYM(*m,
2011 (offset + sizeof(struct sctp_idata_chunk)),
2014 dmbuf = SCTP_M_COPYM(*m,
2015 (offset + sizeof(struct sctp_data_chunk)),
2018 #ifdef SCTP_MBUF_LOGGING
2019 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2020 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2024 /* We can steal the last chunk */
2028 /* lop off the top part */
2029 if (chk_type == SCTP_IDATA) {
2030 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2032 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2034 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2035 l_len = SCTP_BUF_LEN(dmbuf);
2038 * need to count up the size hopefully does not hit
2044 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2045 l_len += SCTP_BUF_LEN(lat);
2048 if (l_len > the_len) {
2049 /* Trim the end round bytes off too */
2050 m_adj(dmbuf, -(l_len - the_len));
2053 if (dmbuf == NULL) {
2054 SCTP_STAT_INCR(sctps_nomem);
2058 * Now no matter what, we need a control, get one if we don't have
2059 * one (we may have gotten it above when we found the message was
2062 if (control == NULL) {
2063 sctp_alloc_a_readq(stcb, control);
2064 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2069 if (control == NULL) {
2070 SCTP_STAT_INCR(sctps_nomem);
2073 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2076 control->data = dmbuf;
2078 for (mm = control->data; mm; mm = mm->m_next) {
2079 control->length += SCTP_BUF_LEN(mm);
2081 control->tail_mbuf = NULL;
2082 control->end_added = 1;
2083 control->last_frag_seen = 1;
2084 control->first_frag_seen = 1;
2085 control->fsn_included = fsn;
2086 control->top_fsn = fsn;
2088 created_control = 1;
2090 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2091 chk_flags, ordered, mid, control);
2092 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2093 TAILQ_EMPTY(&asoc->resetHead) &&
2095 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2096 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2097 /* Candidate for express delivery */
2099 * Its not fragmented, No PD-API is up, Nothing in the
2100 * delivery queue, Its un-ordered OR ordered and the next to
2101 * deliver AND nothing else is stuck on the stream queue,
2102 * And there is room for it in the socket buffer. Lets just
2103 * stuff it up the buffer....
2105 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2106 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2107 asoc->highest_tsn_inside_nr_map = tsn;
2109 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2112 sctp_add_to_readq(stcb->sctp_ep, stcb,
2113 control, &stcb->sctp_socket->so_rcv,
2114 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2116 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2117 /* for ordered, bump what we delivered */
2118 asoc->strmin[sid].last_mid_delivered++;
2120 SCTP_STAT_INCR(sctps_recvexpress);
2121 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2122 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2123 SCTP_STR_LOG_FROM_EXPRS_DEL);
2126 goto finish_express_del;
2128 /* Now will we need a chunk too? */
2129 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2130 sctp_alloc_a_chunk(stcb, chk);
2132 /* No memory so we drop the chunk */
2133 SCTP_STAT_INCR(sctps_nomem);
2134 if (last_chunk == 0) {
2135 /* we copied it, free the copy */
2136 sctp_m_freem(dmbuf);
2140 chk->rec.data.tsn = tsn;
2141 chk->no_fr_allowed = 0;
2142 chk->rec.data.fsn = fsn;
2143 chk->rec.data.mid = mid;
2144 chk->rec.data.sid = sid;
2145 chk->rec.data.ppid = ppid;
2146 chk->rec.data.context = stcb->asoc.context;
2147 chk->rec.data.doing_fast_retransmit = 0;
2148 chk->rec.data.rcv_flags = chk_flags;
2150 chk->send_size = the_len;
2152 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2155 atomic_add_int(&net->ref_count, 1);
2158 /* Set the appropriate TSN mark */
2159 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2160 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2161 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2162 asoc->highest_tsn_inside_nr_map = tsn;
2165 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2166 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2167 asoc->highest_tsn_inside_map = tsn;
2170 /* Now is it complete (i.e. not fragmented)? */
2171 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2173 * Special check for when streams are resetting. We could be
2174 * more smart about this and check the actual stream to see
2175 * if it is not being reset.. that way we would not create a
2176 * HOLB when amongst streams being reset and those not being
2180 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2181 SCTP_TSN_GT(tsn, liste->tsn)) {
2183 * yep its past where we need to reset... go ahead
2186 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2188 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2190 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2191 unsigned char inserted = 0;
2193 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2194 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2199 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2204 if (inserted == 0) {
2206 * must be put at end, use prevP
2207 * (all setup from loop) to setup
2210 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2213 goto finish_express_del;
2215 if (chk_flags & SCTP_DATA_UNORDERED) {
2216 /* queue directly into socket buffer */
2217 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2219 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2220 sctp_add_to_readq(stcb->sctp_ep, stcb,
2222 &stcb->sctp_socket->so_rcv, 1,
2223 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2226 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2228 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2236 goto finish_express_del;
2238 /* If we reach here its a reassembly */
2239 need_reasm_check = 1;
2240 SCTPDBG(SCTP_DEBUG_XXX,
2241 "Queue data to stream for reasm control: %p MID: %u\n",
2243 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2246 * the assoc is now gone and chk was put onto the reasm
2247 * queue, which has all been freed.
2255 /* Here we tidy up things */
2256 if (tsn == (asoc->cumulative_tsn + 1)) {
2257 /* Update cum-ack */
2258 asoc->cumulative_tsn = tsn;
2264 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2266 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2268 SCTP_STAT_INCR(sctps_recvdata);
2269 /* Set it present please */
2270 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2271 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2273 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2274 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2275 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2277 if (need_reasm_check) {
2278 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2279 need_reasm_check = 0;
2281 /* check the special flag for stream resets */
2282 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2283 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2285 * we have finished working through the backlogged TSN's now
2286 * time to reset streams. 1: call reset function. 2: free
2287 * pending_reply space 3: distribute any chunks in
2288 * pending_reply_queue.
2290 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2291 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2292 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2293 SCTP_FREE(liste, SCTP_M_STRESET);
2294 /* sa_ignore FREED_MEMORY */
2295 liste = TAILQ_FIRST(&asoc->resetHead);
2296 if (TAILQ_EMPTY(&asoc->resetHead)) {
2297 /* All can be removed */
2298 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2299 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2300 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2304 if (need_reasm_check) {
2305 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2306 need_reasm_check = 0;
2310 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2311 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2315 * if control->sinfo_tsn is <= liste->tsn we
2316 * can process it which is the NOT of
2317 * control->sinfo_tsn > liste->tsn
2319 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2320 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2324 if (need_reasm_check) {
2325 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2326 need_reasm_check = 0;
2334 static const int8_t sctp_map_lookup_tab[256] = {
2335 0, 1, 0, 2, 0, 1, 0, 3,
2336 0, 1, 0, 2, 0, 1, 0, 4,
2337 0, 1, 0, 2, 0, 1, 0, 3,
2338 0, 1, 0, 2, 0, 1, 0, 5,
2339 0, 1, 0, 2, 0, 1, 0, 3,
2340 0, 1, 0, 2, 0, 1, 0, 4,
2341 0, 1, 0, 2, 0, 1, 0, 3,
2342 0, 1, 0, 2, 0, 1, 0, 6,
2343 0, 1, 0, 2, 0, 1, 0, 3,
2344 0, 1, 0, 2, 0, 1, 0, 4,
2345 0, 1, 0, 2, 0, 1, 0, 3,
2346 0, 1, 0, 2, 0, 1, 0, 5,
2347 0, 1, 0, 2, 0, 1, 0, 3,
2348 0, 1, 0, 2, 0, 1, 0, 4,
2349 0, 1, 0, 2, 0, 1, 0, 3,
2350 0, 1, 0, 2, 0, 1, 0, 7,
2351 0, 1, 0, 2, 0, 1, 0, 3,
2352 0, 1, 0, 2, 0, 1, 0, 4,
2353 0, 1, 0, 2, 0, 1, 0, 3,
2354 0, 1, 0, 2, 0, 1, 0, 5,
2355 0, 1, 0, 2, 0, 1, 0, 3,
2356 0, 1, 0, 2, 0, 1, 0, 4,
2357 0, 1, 0, 2, 0, 1, 0, 3,
2358 0, 1, 0, 2, 0, 1, 0, 6,
2359 0, 1, 0, 2, 0, 1, 0, 3,
2360 0, 1, 0, 2, 0, 1, 0, 4,
2361 0, 1, 0, 2, 0, 1, 0, 3,
2362 0, 1, 0, 2, 0, 1, 0, 5,
2363 0, 1, 0, 2, 0, 1, 0, 3,
2364 0, 1, 0, 2, 0, 1, 0, 4,
2365 0, 1, 0, 2, 0, 1, 0, 3,
2366 0, 1, 0, 2, 0, 1, 0, 8
2371 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2374 * Now we also need to check the mapping array in a couple of ways.
2375 * 1) Did we move the cum-ack point?
2377 * When you first glance at this you might think that all entries
2378 * that make up the position of the cum-ack would be in the
2379 * nr-mapping array only.. i.e. things up to the cum-ack are always
2380 * deliverable. Thats true with one exception, when its a fragmented
2381 * message we may not deliver the data until some threshold (or all
2382 * of it) is in place. So we must OR the nr_mapping_array and
2383 * mapping_array to get a true picture of the cum-ack.
2385 struct sctp_association *asoc;
2388 int slide_from, slide_end, lgap, distance;
2389 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2393 old_cumack = asoc->cumulative_tsn;
2394 old_base = asoc->mapping_array_base_tsn;
2395 old_highest = asoc->highest_tsn_inside_map;
2397 * We could probably improve this a small bit by calculating the
2398 * offset of the current cum-ack as the starting point.
2401 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2402 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2406 /* there is a 0 bit */
2407 at += sctp_map_lookup_tab[val];
2411 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2413 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2414 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2416 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2417 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2419 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2420 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2421 sctp_print_mapping_array(asoc);
2422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2423 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2425 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2426 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2429 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2430 highest_tsn = asoc->highest_tsn_inside_nr_map;
2432 highest_tsn = asoc->highest_tsn_inside_map;
2434 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2435 /* The complete array was completed by a single FR */
2436 /* highest becomes the cum-ack */
2442 /* clear the array */
2443 clr = ((at + 7) >> 3);
2444 if (clr > asoc->mapping_array_size) {
2445 clr = asoc->mapping_array_size;
2447 memset(asoc->mapping_array, 0, clr);
2448 memset(asoc->nr_mapping_array, 0, clr);
2450 for (i = 0; i < asoc->mapping_array_size; i++) {
2451 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2452 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2453 sctp_print_mapping_array(asoc);
2457 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2458 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2459 } else if (at >= 8) {
2460 /* we can slide the mapping array down */
2461 /* slide_from holds where we hit the first NON 0xff byte */
2464 * now calculate the ceiling of the move using our highest
2467 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2468 slide_end = (lgap >> 3);
2469 if (slide_end < slide_from) {
2470 sctp_print_mapping_array(asoc);
2472 panic("impossible slide");
2474 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2475 lgap, slide_end, slide_from, at);
2479 if (slide_end > asoc->mapping_array_size) {
2481 panic("would overrun buffer");
2483 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2484 asoc->mapping_array_size, slide_end);
2485 slide_end = asoc->mapping_array_size;
2488 distance = (slide_end - slide_from) + 1;
2489 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2490 sctp_log_map(old_base, old_cumack, old_highest,
2491 SCTP_MAP_PREPARE_SLIDE);
2492 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2493 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2495 if (distance + slide_from > asoc->mapping_array_size ||
2498 * Here we do NOT slide forward the array so that
2499 * hopefully when more data comes in to fill it up
2500 * we will be able to slide it forward. Really I
2501 * don't think this should happen :-0
2504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2505 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2506 (uint32_t)asoc->mapping_array_size,
2507 SCTP_MAP_SLIDE_NONE);
2512 for (ii = 0; ii < distance; ii++) {
2513 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2514 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2517 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2518 asoc->mapping_array[ii] = 0;
2519 asoc->nr_mapping_array[ii] = 0;
2521 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2522 asoc->highest_tsn_inside_map += (slide_from << 3);
2524 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2525 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2527 asoc->mapping_array_base_tsn += (slide_from << 3);
2528 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2529 sctp_log_map(asoc->mapping_array_base_tsn,
2530 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2531 SCTP_MAP_SLIDE_RESULT);
2538 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2540 struct sctp_association *asoc;
2541 uint32_t highest_tsn;
2544 sctp_slide_mapping_arrays(stcb);
2546 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2547 highest_tsn = asoc->highest_tsn_inside_nr_map;
2549 highest_tsn = asoc->highest_tsn_inside_map;
2551 /* Is there a gap now? */
2552 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2555 * Now we need to see if we need to queue a sack or just start the
2556 * timer (if allowed).
2558 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2560 * Ok special case, in SHUTDOWN-SENT case. here we maker
2561 * sure SACK timer is off and instead send a SHUTDOWN and a
2564 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2565 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2566 stcb->sctp_ep, stcb, NULL,
2567 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2569 sctp_send_shutdown(stcb,
2570 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2572 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2576 * CMT DAC algorithm: increase number of packets received
2579 stcb->asoc.cmt_dac_pkts_rcvd++;
2581 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2583 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2585 (stcb->asoc.numduptsns) || /* we have dup's */
2586 (is_a_gap) || /* is still a gap */
2587 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2588 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2591 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2592 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2593 (stcb->asoc.send_sack == 0) &&
2594 (stcb->asoc.numduptsns == 0) &&
2595 (stcb->asoc.delayed_ack) &&
2596 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2599 * CMT DAC algorithm: With CMT, delay acks
2600 * even in the face of
2602 * reordering. Therefore, if acks that do
2603 * not have to be sent because of the above
2604 * reasons, will be delayed. That is, acks
2605 * that would have been sent due to gap
2606 * reports will be delayed with DAC. Start
2607 * the delayed ack timer.
2609 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2610 stcb->sctp_ep, stcb, NULL);
2613 * Ok we must build a SACK since the timer
2614 * is pending, we got our first packet OR
2615 * there are gaps or duplicates.
2617 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2618 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2621 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2622 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2623 stcb->sctp_ep, stcb, NULL);
2630 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2631 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2632 struct sctp_nets *net, uint32_t *high_tsn)
2634 struct sctp_chunkhdr *ch, chunk_buf;
2635 struct sctp_association *asoc;
2636 int num_chunks = 0; /* number of control chunks processed */
2638 int chk_length, break_flag, last_chunk;
2639 int abort_flag = 0, was_a_gap;
2641 uint32_t highest_tsn;
2644 sctp_set_rwnd(stcb, &stcb->asoc);
2647 SCTP_TCB_LOCK_ASSERT(stcb);
2649 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2650 highest_tsn = asoc->highest_tsn_inside_nr_map;
2652 highest_tsn = asoc->highest_tsn_inside_map;
2654 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2656 * setup where we got the last DATA packet from for any SACK that
2657 * may need to go out. Don't bump the net. This is done ONLY when a
2658 * chunk is assigned.
2660 asoc->last_data_chunk_from = net;
2663 * Now before we proceed we must figure out if this is a wasted
2664 * cluster... i.e. it is a small packet sent in and yet the driver
2665 * underneath allocated a full cluster for it. If so we must copy it
2666 * to a smaller mbuf and free up the cluster mbuf. This will help
2667 * with cluster starvation. Note for __Panda__ we don't do this
2668 * since it has clusters all the way down to 64 bytes.
2670 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2671 /* we only handle mbufs that are singletons.. not chains */
2672 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2674 /* ok lets see if we can copy the data up */
2677 /* get the pointers and copy */
2678 to = mtod(m, caddr_t *);
2679 from = mtod((*mm), caddr_t *);
2680 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2681 /* copy the length and free up the old */
2682 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2684 /* success, back copy */
2687 /* We are in trouble in the mbuf world .. yikes */
2691 /* get pointer to the first chunk header */
2692 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2693 sizeof(struct sctp_chunkhdr), (uint8_t *)&chunk_buf);
2698 * process all DATA chunks...
2700 *high_tsn = asoc->cumulative_tsn;
2702 asoc->data_pkts_seen++;
2703 while (stop_proc == 0) {
2704 /* validate chunk length */
2705 chk_length = ntohs(ch->chunk_length);
2706 if (length - *offset < chk_length) {
2707 /* all done, mutulated chunk */
2711 if ((asoc->idata_supported == 1) &&
2712 (ch->chunk_type == SCTP_DATA)) {
2713 struct mbuf *op_err;
2714 char msg[SCTP_DIAG_INFO_LEN];
2716 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2717 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2718 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2719 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2722 if ((asoc->idata_supported == 0) &&
2723 (ch->chunk_type == SCTP_IDATA)) {
2724 struct mbuf *op_err;
2725 char msg[SCTP_DIAG_INFO_LEN];
2727 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2728 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2729 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2730 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2733 if ((ch->chunk_type == SCTP_DATA) ||
2734 (ch->chunk_type == SCTP_IDATA)) {
2737 if (ch->chunk_type == SCTP_DATA) {
2738 clen = sizeof(struct sctp_data_chunk);
2740 clen = sizeof(struct sctp_idata_chunk);
2742 if (chk_length < clen) {
2744 * Need to send an abort since we had a
2745 * invalid data chunk.
2747 struct mbuf *op_err;
2748 char msg[SCTP_DIAG_INFO_LEN];
2750 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2752 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2753 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2754 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2757 #ifdef SCTP_AUDITING_ENABLED
2758 sctp_audit_log(0xB1, 0);
2760 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2765 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2766 chk_length, net, high_tsn, &abort_flag, &break_flag,
2767 last_chunk, ch->chunk_type)) {
2775 * Set because of out of rwnd space and no
2776 * drop rep space left.
2782 /* not a data chunk in the data region */
2783 switch (ch->chunk_type) {
2784 case SCTP_INITIATION:
2785 case SCTP_INITIATION_ACK:
2786 case SCTP_SELECTIVE_ACK:
2787 case SCTP_NR_SELECTIVE_ACK:
2788 case SCTP_HEARTBEAT_REQUEST:
2789 case SCTP_HEARTBEAT_ACK:
2790 case SCTP_ABORT_ASSOCIATION:
2792 case SCTP_SHUTDOWN_ACK:
2793 case SCTP_OPERATION_ERROR:
2794 case SCTP_COOKIE_ECHO:
2795 case SCTP_COOKIE_ACK:
2798 case SCTP_SHUTDOWN_COMPLETE:
2799 case SCTP_AUTHENTICATION:
2800 case SCTP_ASCONF_ACK:
2801 case SCTP_PACKET_DROPPED:
2802 case SCTP_STREAM_RESET:
2803 case SCTP_FORWARD_CUM_TSN:
2807 * Now, what do we do with KNOWN
2808 * chunks that are NOT in the right
2811 * For now, I do nothing but ignore
2812 * them. We may later want to add
2813 * sysctl stuff to switch out and do
2814 * either an ABORT() or possibly
2817 struct mbuf *op_err;
2818 char msg[SCTP_DIAG_INFO_LEN];
2820 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2822 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2823 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2827 /* unknown chunk type, use bit rules */
2828 if (ch->chunk_type & 0x40) {
2829 /* Add a error report to the queue */
2830 struct mbuf *op_err;
2831 struct sctp_gen_error_cause *cause;
2833 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2834 0, M_NOWAIT, 1, MT_DATA);
2835 if (op_err != NULL) {
2836 cause = mtod(op_err, struct sctp_gen_error_cause *);
2837 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2838 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2839 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2840 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2841 if (SCTP_BUF_NEXT(op_err) != NULL) {
2842 sctp_queue_op_err(stcb, op_err);
2844 sctp_m_freem(op_err);
2848 if ((ch->chunk_type & 0x80) == 0) {
2849 /* discard the rest of this packet */
2851 } /* else skip this bad chunk and
2854 } /* switch of chunk type */
2856 *offset += SCTP_SIZE32(chk_length);
2857 if ((*offset >= length) || stop_proc) {
2858 /* no more data left in the mbuf chain */
2862 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2863 sizeof(struct sctp_chunkhdr), (uint8_t *)&chunk_buf);
2872 * we need to report rwnd overrun drops.
2874 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2878 * Did we get data, if so update the time for auto-close and
2879 * give peer credit for being alive.
2881 SCTP_STAT_INCR(sctps_recvpktwithdata);
2882 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2883 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2884 stcb->asoc.overall_error_count,
2886 SCTP_FROM_SCTP_INDATA,
2889 stcb->asoc.overall_error_count = 0;
2890 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2892 /* now service all of the reassm queue if needed */
2893 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2894 /* Assure that we ack right away */
2895 stcb->asoc.send_sack = 1;
2897 /* Start a sack timer or QUEUE a SACK for sending */
2898 sctp_sack_check(stcb, was_a_gap);
2903 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2904 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2906 uint32_t *biggest_newly_acked_tsn,
2907 uint32_t *this_sack_lowest_newack,
2910 struct sctp_tmit_chunk *tp1;
2911 unsigned int theTSN;
2912 int j, wake_him = 0, circled = 0;
2914 /* Recover the tp1 we last saw */
2917 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2919 for (j = frag_strt; j <= frag_end; j++) {
2920 theTSN = j + last_tsn;
2922 if (tp1->rec.data.doing_fast_retransmit)
2926 * CMT: CUCv2 algorithm. For each TSN being
2927 * processed from the sent queue, track the
2928 * next expected pseudo-cumack, or
2929 * rtx_pseudo_cumack, if required. Separate
2930 * cumack trackers for first transmissions,
2931 * and retransmissions.
2933 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2934 (tp1->whoTo->find_pseudo_cumack == 1) &&
2935 (tp1->snd_count == 1)) {
2936 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2937 tp1->whoTo->find_pseudo_cumack = 0;
2939 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2940 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2941 (tp1->snd_count > 1)) {
2942 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2943 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2945 if (tp1->rec.data.tsn == theTSN) {
2946 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2948 * must be held until
2951 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2953 * If it is less than RESEND, it is
2954 * now no-longer in flight.
2955 * Higher values may already be set
2956 * via previous Gap Ack Blocks...
2957 * i.e. ACKED or RESEND.
2959 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2960 *biggest_newly_acked_tsn)) {
2961 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
2964 * CMT: SFR algo (and HTNA) - set
2965 * saw_newack to 1 for dest being
2966 * newly acked. update
2967 * this_sack_highest_newack if
2970 if (tp1->rec.data.chunk_was_revoked == 0)
2971 tp1->whoTo->saw_newack = 1;
2973 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2974 tp1->whoTo->this_sack_highest_newack)) {
2975 tp1->whoTo->this_sack_highest_newack =
2979 * CMT DAC algo: also update
2980 * this_sack_lowest_newack
2982 if (*this_sack_lowest_newack == 0) {
2983 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2984 sctp_log_sack(*this_sack_lowest_newack,
2989 SCTP_LOG_TSN_ACKED);
2991 *this_sack_lowest_newack = tp1->rec.data.tsn;
2994 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2995 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2996 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2997 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2998 * Separate pseudo_cumack trackers for first transmissions and
3001 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3002 if (tp1->rec.data.chunk_was_revoked == 0) {
3003 tp1->whoTo->new_pseudo_cumack = 1;
3005 tp1->whoTo->find_pseudo_cumack = 1;
3007 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3008 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3010 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3011 if (tp1->rec.data.chunk_was_revoked == 0) {
3012 tp1->whoTo->new_pseudo_cumack = 1;
3014 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3016 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3017 sctp_log_sack(*biggest_newly_acked_tsn,
3022 SCTP_LOG_TSN_ACKED);
3024 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3025 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3026 tp1->whoTo->flight_size,
3028 (uint32_t)(uintptr_t)tp1->whoTo,
3031 sctp_flight_size_decrease(tp1);
3032 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3033 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3036 sctp_total_flight_decrease(stcb, tp1);
3038 tp1->whoTo->net_ack += tp1->send_size;
3039 if (tp1->snd_count < 2) {
3041 * True non-retransmited chunk
3043 tp1->whoTo->net_ack2 += tp1->send_size;
3051 sctp_calculate_rto(stcb,
3054 &tp1->sent_rcv_time,
3055 sctp_align_safe_nocopy,
3056 SCTP_RTT_FROM_DATA);
3059 if (tp1->whoTo->rto_needed == 0) {
3060 tp1->whoTo->rto_needed = 1;
3066 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3067 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3068 stcb->asoc.this_sack_highest_gap)) {
3069 stcb->asoc.this_sack_highest_gap =
3072 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3073 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3074 #ifdef SCTP_AUDITING_ENABLED
3075 sctp_audit_log(0xB2,
3076 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3081 * All chunks NOT UNSENT fall through here and are marked
3082 * (leave PR-SCTP ones that are to skip alone though)
3084 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3085 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3086 tp1->sent = SCTP_DATAGRAM_MARKED;
3088 if (tp1->rec.data.chunk_was_revoked) {
3089 /* deflate the cwnd */
3090 tp1->whoTo->cwnd -= tp1->book_size;
3091 tp1->rec.data.chunk_was_revoked = 0;
3093 /* NR Sack code here */
3095 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3096 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3097 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3100 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3103 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3104 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3105 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3106 stcb->asoc.trigger_reset = 1;
3108 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3114 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3115 sctp_m_freem(tp1->data);
3122 } /* if (tp1->tsn == theTSN) */
3123 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3126 tp1 = TAILQ_NEXT(tp1, sctp_next);
3127 if ((tp1 == NULL) && (circled == 0)) {
3129 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3131 } /* end while (tp1) */
3134 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3136 /* In case the fragments were not in order we must reset */
3137 } /* end for (j = fragStart */
3139 return (wake_him); /* Return value only used for nr-sack */
3144 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3145 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3146 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3147 int num_seg, int num_nr_seg, int *rto_ok)
3149 struct sctp_gap_ack_block *frag, block;
3150 struct sctp_tmit_chunk *tp1;
3155 uint16_t frag_strt, frag_end, prev_frag_end;
3157 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3161 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3164 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3166 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3167 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3168 *offset += sizeof(block);
3170 return (chunk_freed);
3172 frag_strt = ntohs(frag->start);
3173 frag_end = ntohs(frag->end);
3175 if (frag_strt > frag_end) {
3176 /* This gap report is malformed, skip it. */
3179 if (frag_strt <= prev_frag_end) {
3180 /* This gap report is not in order, so restart. */
3181 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3183 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3184 *biggest_tsn_acked = last_tsn + frag_end;
3191 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3192 non_revocable, &num_frs, biggest_newly_acked_tsn,
3193 this_sack_lowest_newack, rto_ok)) {
3196 prev_frag_end = frag_end;
3198 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3200 sctp_log_fr(*biggest_tsn_acked,
3201 *biggest_newly_acked_tsn,
3202 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3204 return (chunk_freed);
3208 sctp_check_for_revoked(struct sctp_tcb *stcb,
3209 struct sctp_association *asoc, uint32_t cumack,
3210 uint32_t biggest_tsn_acked)
3212 struct sctp_tmit_chunk *tp1;
3214 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3215 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3217 * ok this guy is either ACK or MARKED. If it is
3218 * ACKED it has been previously acked but not this
3219 * time i.e. revoked. If it is MARKED it was ACK'ed
3222 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3225 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3226 /* it has been revoked */
3227 tp1->sent = SCTP_DATAGRAM_SENT;
3228 tp1->rec.data.chunk_was_revoked = 1;
3230 * We must add this stuff back in to assure
3231 * timers and such get started.
3233 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3234 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3235 tp1->whoTo->flight_size,
3237 (uint32_t)(uintptr_t)tp1->whoTo,
3240 sctp_flight_size_increase(tp1);
3241 sctp_total_flight_increase(stcb, tp1);
3243 * We inflate the cwnd to compensate for our
3244 * artificial inflation of the flight_size.
3246 tp1->whoTo->cwnd += tp1->book_size;
3247 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3248 sctp_log_sack(asoc->last_acked_seq,
3253 SCTP_LOG_TSN_REVOKED);
3255 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3256 /* it has been re-acked in this SACK */
3257 tp1->sent = SCTP_DATAGRAM_ACKED;
3260 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3267 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3268 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3270 struct sctp_tmit_chunk *tp1;
3271 int strike_flag = 0;
3273 int tot_retrans = 0;
3274 uint32_t sending_seq;
3275 struct sctp_nets *net;
3276 int num_dests_sacked = 0;
3279 * select the sending_seq, this is either the next thing ready to be
3280 * sent but not transmitted, OR, the next seq we assign.
3282 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3284 sending_seq = asoc->sending_seq;
3286 sending_seq = tp1->rec.data.tsn;
3289 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3290 if ((asoc->sctp_cmt_on_off > 0) &&
3291 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3292 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3293 if (net->saw_newack)
3297 if (stcb->asoc.prsctp_supported) {
3298 (void)SCTP_GETTIME_TIMEVAL(&now);
3300 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3302 if (tp1->no_fr_allowed) {
3303 /* this one had a timeout or something */
3306 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3307 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3308 sctp_log_fr(biggest_tsn_newly_acked,
3311 SCTP_FR_LOG_CHECK_STRIKE);
3313 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3314 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3318 if (stcb->asoc.prsctp_supported) {
3319 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3320 /* Is it expired? */
3321 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3322 /* Yes so drop it */
3323 if (tp1->data != NULL) {
3324 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3325 SCTP_SO_NOT_LOCKED);
3331 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
3332 /* we are beyond the tsn in the sack */
3335 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3336 /* either a RESEND, ACKED, or MARKED */
3338 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3339 /* Continue strikin FWD-TSN chunks */
3340 tp1->rec.data.fwd_tsn_cnt++;
3345 * CMT : SFR algo (covers part of DAC and HTNA as well)
3347 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3349 * No new acks were receieved for data sent to this
3350 * dest. Therefore, according to the SFR algo for
3351 * CMT, no data sent to this dest can be marked for
3352 * FR using this SACK.
3355 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
3356 tp1->whoTo->this_sack_highest_newack)) {
3358 * CMT: New acks were receieved for data sent to
3359 * this dest. But no new acks were seen for data
3360 * sent after tp1. Therefore, according to the SFR
3361 * algo for CMT, tp1 cannot be marked for FR using
3362 * this SACK. This step covers part of the DAC algo
3363 * and the HTNA algo as well.
3368 * Here we check to see if we were have already done a FR
3369 * and if so we see if the biggest TSN we saw in the sack is
3370 * smaller than the recovery point. If so we don't strike
3371 * the tsn... otherwise we CAN strike the TSN.
3374 * @@@ JRI: Check for CMT if (accum_moved &&
3375 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3378 if (accum_moved && asoc->fast_retran_loss_recovery) {
3380 * Strike the TSN if in fast-recovery and cum-ack
3383 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3384 sctp_log_fr(biggest_tsn_newly_acked,
3387 SCTP_FR_LOG_STRIKE_CHUNK);
3389 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3392 if ((asoc->sctp_cmt_on_off > 0) &&
3393 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3395 * CMT DAC algorithm: If SACK flag is set to
3396 * 0, then lowest_newack test will not pass
3397 * because it would have been set to the
3398 * cumack earlier. If not already to be
3399 * rtx'd, If not a mixed sack and if tp1 is
3400 * not between two sacked TSNs, then mark by
3401 * one more. NOTE that we are marking by one
3402 * additional time since the SACK DAC flag
3403 * indicates that two packets have been
3404 * received after this missing TSN.
3406 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3407 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3408 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3409 sctp_log_fr(16 + num_dests_sacked,
3412 SCTP_FR_LOG_STRIKE_CHUNK);
3417 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3418 (asoc->sctp_cmt_on_off == 0)) {
3420 * For those that have done a FR we must take
3421 * special consideration if we strike. I.e the
3422 * biggest_newly_acked must be higher than the
3423 * sending_seq at the time we did the FR.
3426 #ifdef SCTP_FR_TO_ALTERNATE
3428 * If FR's go to new networks, then we must only do
3429 * this for singly homed asoc's. However if the FR's
3430 * go to the same network (Armando's work) then its
3431 * ok to FR multiple times.
3439 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3440 tp1->rec.data.fast_retran_tsn)) {
3442 * Strike the TSN, since this ack is
3443 * beyond where things were when we
3446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3447 sctp_log_fr(biggest_tsn_newly_acked,
3450 SCTP_FR_LOG_STRIKE_CHUNK);
3452 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3456 if ((asoc->sctp_cmt_on_off > 0) &&
3457 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3459 * CMT DAC algorithm: If
3460 * SACK flag is set to 0,
3461 * then lowest_newack test
3462 * will not pass because it
3463 * would have been set to
3464 * the cumack earlier. If
3465 * not already to be rtx'd,
3466 * If not a mixed sack and
3467 * if tp1 is not between two
3468 * sacked TSNs, then mark by
3469 * one more. NOTE that we
3470 * are marking by one
3471 * additional time since the
3472 * SACK DAC flag indicates
3473 * that two packets have
3474 * been received after this
3477 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3478 (num_dests_sacked == 1) &&
3479 SCTP_TSN_GT(this_sack_lowest_newack,
3480 tp1->rec.data.tsn)) {
3481 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3482 sctp_log_fr(32 + num_dests_sacked,
3485 SCTP_FR_LOG_STRIKE_CHUNK);
3487 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3495 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3498 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3499 biggest_tsn_newly_acked)) {
3501 * We don't strike these: This is the HTNA
3502 * algorithm i.e. we don't strike If our TSN is
3503 * larger than the Highest TSN Newly Acked.
3507 /* Strike the TSN */
3508 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3509 sctp_log_fr(biggest_tsn_newly_acked,
3512 SCTP_FR_LOG_STRIKE_CHUNK);
3514 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3517 if ((asoc->sctp_cmt_on_off > 0) &&
3518 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3520 * CMT DAC algorithm: If SACK flag is set to
3521 * 0, then lowest_newack test will not pass
3522 * because it would have been set to the
3523 * cumack earlier. If not already to be
3524 * rtx'd, If not a mixed sack and if tp1 is
3525 * not between two sacked TSNs, then mark by
3526 * one more. NOTE that we are marking by one
3527 * additional time since the SACK DAC flag
3528 * indicates that two packets have been
3529 * received after this missing TSN.
3531 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3532 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3533 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3534 sctp_log_fr(48 + num_dests_sacked,
3537 SCTP_FR_LOG_STRIKE_CHUNK);
3543 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3544 struct sctp_nets *alt;
3546 /* fix counts and things */
3547 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3548 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3549 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3551 (uint32_t)(uintptr_t)tp1->whoTo,
3555 tp1->whoTo->net_ack++;
3556 sctp_flight_size_decrease(tp1);
3557 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3558 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3562 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3563 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3564 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3566 /* add back to the rwnd */
3567 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3569 /* remove from the total flight */
3570 sctp_total_flight_decrease(stcb, tp1);
3572 if ((stcb->asoc.prsctp_supported) &&
3573 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3575 * Has it been retransmitted tv_sec times? -
3576 * we store the retran count there.
3578 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3579 /* Yes, so drop it */
3580 if (tp1->data != NULL) {
3581 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3582 SCTP_SO_NOT_LOCKED);
3584 /* Make sure to flag we had a FR */
3585 tp1->whoTo->net_ack++;
3590 * SCTP_PRINTF("OK, we are now ready to FR this
3593 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3594 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3598 /* This is a subsequent FR */
3599 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3601 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3602 if (asoc->sctp_cmt_on_off > 0) {
3604 * CMT: Using RTX_SSTHRESH policy for CMT.
3605 * If CMT is being used, then pick dest with
3606 * largest ssthresh for any retransmission.
3608 tp1->no_fr_allowed = 1;
3610 /* sa_ignore NO_NULL_CHK */
3611 if (asoc->sctp_cmt_pf > 0) {
3613 * JRS 5/18/07 - If CMT PF is on,
3614 * use the PF version of
3617 alt = sctp_find_alternate_net(stcb, alt, 2);
3620 * JRS 5/18/07 - If only CMT is on,
3621 * use the CMT version of
3624 /* sa_ignore NO_NULL_CHK */
3625 alt = sctp_find_alternate_net(stcb, alt, 1);
3631 * CUCv2: If a different dest is picked for
3632 * the retransmission, then new
3633 * (rtx-)pseudo_cumack needs to be tracked
3634 * for orig dest. Let CUCv2 track new (rtx-)
3635 * pseudo-cumack always.
3638 tp1->whoTo->find_pseudo_cumack = 1;
3639 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3641 } else {/* CMT is OFF */
3643 #ifdef SCTP_FR_TO_ALTERNATE
3644 /* Can we find an alternate? */
3645 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3648 * default behavior is to NOT retransmit
3649 * FR's to an alternate. Armando Caro's
3650 * paper details why.
3656 tp1->rec.data.doing_fast_retransmit = 1;
3658 /* mark the sending seq for possible subsequent FR's */
3660 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3661 * (uint32_t)tpi->rec.data.tsn);
3663 if (TAILQ_EMPTY(&asoc->send_queue)) {
3665 * If the queue of send is empty then its
3666 * the next sequence number that will be
3667 * assigned so we subtract one from this to
3668 * get the one we last sent.
3670 tp1->rec.data.fast_retran_tsn = sending_seq;
3673 * If there are chunks on the send queue
3674 * (unsent data that has made it from the
3675 * stream queues but not out the door, we
3676 * take the first one (which will have the
3677 * lowest TSN) and subtract one to get the
3680 struct sctp_tmit_chunk *ttt;
3682 ttt = TAILQ_FIRST(&asoc->send_queue);
3683 tp1->rec.data.fast_retran_tsn =
3689 * this guy had a RTO calculation pending on
3692 if ((tp1->whoTo != NULL) &&
3693 (tp1->whoTo->rto_needed == 0)) {
3694 tp1->whoTo->rto_needed = 1;
3698 if (alt != tp1->whoTo) {
3699 /* yes, there is an alternate. */
3700 sctp_free_remote_addr(tp1->whoTo);
3701 /* sa_ignore FREED_MEMORY */
3703 atomic_add_int(&alt->ref_count, 1);
3709 struct sctp_tmit_chunk *
3710 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3711 struct sctp_association *asoc)
3713 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3717 if (asoc->prsctp_supported == 0) {
3720 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3721 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3722 tp1->sent != SCTP_DATAGRAM_RESEND &&
3723 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3724 /* no chance to advance, out of here */
3727 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3728 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3729 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3730 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3731 asoc->advanced_peer_ack_point,
3732 tp1->rec.data.tsn, 0, 0);
3735 if (!PR_SCTP_ENABLED(tp1->flags)) {
3737 * We can't fwd-tsn past any that are reliable aka
3738 * retransmitted until the asoc fails.
3743 (void)SCTP_GETTIME_TIMEVAL(&now);
3747 * now we got a chunk which is marked for another
3748 * retransmission to a PR-stream but has run out its chances
3749 * already maybe OR has been marked to skip now. Can we skip
3750 * it if its a resend?
3752 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3753 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3755 * Now is this one marked for resend and its time is
3758 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3759 /* Yes so drop it */
3761 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3762 1, SCTP_SO_NOT_LOCKED);
3766 * No, we are done when hit one for resend
3767 * whos time as not expired.
3773 * Ok now if this chunk is marked to drop it we can clean up
3774 * the chunk, advance our peer ack point and we can check
3777 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3778 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3779 /* advance PeerAckPoint goes forward */
3780 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3781 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3783 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3784 /* No update but we do save the chk */
3789 * If it is still in RESEND we can advance no
3799 sctp_fs_audit(struct sctp_association *asoc)
3801 struct sctp_tmit_chunk *chk;
3802 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3805 int entry_flight, entry_cnt;
3810 entry_flight = asoc->total_flight;
3811 entry_cnt = asoc->total_flight_count;
3813 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3816 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3817 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3818 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3823 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3825 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3827 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3834 if ((inflight > 0) || (inbetween > 0)) {
3836 panic("Flight size-express incorrect? \n");
3838 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3839 entry_flight, entry_cnt);
3841 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3842 inflight, inbetween, resend, above, acked);
3851 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3852 struct sctp_association *asoc,
3853 struct sctp_tmit_chunk *tp1)
3855 tp1->window_probe = 0;
3856 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3857 /* TSN's skipped we do NOT move back. */
3858 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3859 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3861 (uint32_t)(uintptr_t)tp1->whoTo,
3865 /* First setup this by shrinking flight */
3866 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3867 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3870 sctp_flight_size_decrease(tp1);
3871 sctp_total_flight_decrease(stcb, tp1);
3872 /* Now mark for resend */
3873 tp1->sent = SCTP_DATAGRAM_RESEND;
3874 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3876 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3877 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3878 tp1->whoTo->flight_size,
3880 (uint32_t)(uintptr_t)tp1->whoTo,
3886 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3887 uint32_t rwnd, int *abort_now, int ecne_seen)
3889 struct sctp_nets *net;
3890 struct sctp_association *asoc;
3891 struct sctp_tmit_chunk *tp1, *tp2;
3893 int win_probe_recovery = 0;
3894 int win_probe_recovered = 0;
3895 int j, done_once = 0;
3899 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3900 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3901 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3903 SCTP_TCB_LOCK_ASSERT(stcb);
3904 #ifdef SCTP_ASOCLOG_OF_TSNS
3905 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3906 stcb->asoc.cumack_log_at++;
3907 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3908 stcb->asoc.cumack_log_at = 0;
3912 old_rwnd = asoc->peers_rwnd;
3913 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3916 } else if (asoc->last_acked_seq == cumack) {
3917 /* Window update sack */
3918 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3919 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3920 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3921 /* SWS sender side engages */
3922 asoc->peers_rwnd = 0;
3924 if (asoc->peers_rwnd > old_rwnd) {
3929 /* First setup for CC stuff */
3930 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3931 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3932 /* Drag along the window_tsn for cwr's */
3933 net->cwr_window_tsn = cumack;
3935 net->prev_cwnd = net->cwnd;
3940 * CMT: Reset CUC and Fast recovery algo variables before
3943 net->new_pseudo_cumack = 0;
3944 net->will_exit_fast_recovery = 0;
3945 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3946 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3949 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3950 tp1 = TAILQ_LAST(&asoc->sent_queue,
3951 sctpchunk_listhead);
3952 send_s = tp1->rec.data.tsn + 1;
3954 send_s = asoc->sending_seq;
3956 if (SCTP_TSN_GE(cumack, send_s)) {
3957 struct mbuf *op_err;
3958 char msg[SCTP_DIAG_INFO_LEN];
3962 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3964 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3965 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3966 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3969 asoc->this_sack_highest_gap = cumack;
3970 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3971 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3972 stcb->asoc.overall_error_count,
3974 SCTP_FROM_SCTP_INDATA,
3977 stcb->asoc.overall_error_count = 0;
3978 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3979 /* process the new consecutive TSN first */
3980 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3981 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
3982 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3983 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3985 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3987 * If it is less than ACKED, it is
3988 * now no-longer in flight. Higher
3989 * values may occur during marking
3991 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3992 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3993 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3994 tp1->whoTo->flight_size,
3996 (uint32_t)(uintptr_t)tp1->whoTo,
3999 sctp_flight_size_decrease(tp1);
4000 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4001 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4004 /* sa_ignore NO_NULL_CHK */
4005 sctp_total_flight_decrease(stcb, tp1);
4007 tp1->whoTo->net_ack += tp1->send_size;
4008 if (tp1->snd_count < 2) {
4010 * True non-retransmited
4013 tp1->whoTo->net_ack2 +=
4016 /* update RTO too? */
4024 sctp_calculate_rto(stcb,
4026 &tp1->sent_rcv_time,
4027 sctp_align_safe_nocopy,
4028 SCTP_RTT_FROM_DATA);
4031 if (tp1->whoTo->rto_needed == 0) {
4032 tp1->whoTo->rto_needed = 1;
4038 * CMT: CUCv2 algorithm. From the
4039 * cumack'd TSNs, for each TSN being
4040 * acked for the first time, set the
4041 * following variables for the
4042 * corresp destination.
4043 * new_pseudo_cumack will trigger a
4045 * find_(rtx_)pseudo_cumack will
4046 * trigger search for the next
4047 * expected (rtx-)pseudo-cumack.
4049 tp1->whoTo->new_pseudo_cumack = 1;
4050 tp1->whoTo->find_pseudo_cumack = 1;
4051 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4053 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4054 /* sa_ignore NO_NULL_CHK */
4055 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4058 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4059 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4061 if (tp1->rec.data.chunk_was_revoked) {
4062 /* deflate the cwnd */
4063 tp1->whoTo->cwnd -= tp1->book_size;
4064 tp1->rec.data.chunk_was_revoked = 0;
4066 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4067 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4068 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4071 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4075 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4076 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4077 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4078 asoc->trigger_reset = 1;
4080 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4082 /* sa_ignore NO_NULL_CHK */
4083 sctp_free_bufspace(stcb, asoc, tp1, 1);
4084 sctp_m_freem(tp1->data);
4087 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4088 sctp_log_sack(asoc->last_acked_seq,
4093 SCTP_LOG_FREE_SENT);
4095 asoc->sent_queue_cnt--;
4096 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4103 /* sa_ignore NO_NULL_CHK */
4104 if (stcb->sctp_socket) {
4105 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4109 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4110 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4111 /* sa_ignore NO_NULL_CHK */
4112 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4114 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4115 so = SCTP_INP_SO(stcb->sctp_ep);
4116 atomic_add_int(&stcb->asoc.refcnt, 1);
4117 SCTP_TCB_UNLOCK(stcb);
4118 SCTP_SOCKET_LOCK(so, 1);
4119 SCTP_TCB_LOCK(stcb);
4120 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4121 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4122 /* assoc was freed while we were unlocked */
4123 SCTP_SOCKET_UNLOCK(so, 1);
4127 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4128 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4129 SCTP_SOCKET_UNLOCK(so, 1);
4132 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4133 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4137 /* JRS - Use the congestion control given in the CC module */
4138 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4139 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4140 if (net->net_ack2 > 0) {
4142 * Karn's rule applies to clearing error
4143 * count, this is optional.
4145 net->error_count = 0;
4146 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4147 /* addr came good */
4148 net->dest_state |= SCTP_ADDR_REACHABLE;
4149 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4150 0, (void *)net, SCTP_SO_NOT_LOCKED);
4152 if (net == stcb->asoc.primary_destination) {
4153 if (stcb->asoc.alternate) {
4155 * release the alternate,
4158 sctp_free_remote_addr(stcb->asoc.alternate);
4159 stcb->asoc.alternate = NULL;
4162 if (net->dest_state & SCTP_ADDR_PF) {
4163 net->dest_state &= ~SCTP_ADDR_PF;
4164 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4165 stcb->sctp_ep, stcb, net,
4166 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4167 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4168 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4169 /* Done with this net */
4172 /* restore any doubled timers */
4173 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4174 if (net->RTO < stcb->asoc.minrto) {
4175 net->RTO = stcb->asoc.minrto;
4177 if (net->RTO > stcb->asoc.maxrto) {
4178 net->RTO = stcb->asoc.maxrto;
4182 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4184 asoc->last_acked_seq = cumack;
4186 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4187 /* nothing left in-flight */
4188 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4189 net->flight_size = 0;
4190 net->partial_bytes_acked = 0;
4192 asoc->total_flight = 0;
4193 asoc->total_flight_count = 0;
4196 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4197 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4198 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4199 /* SWS sender side engages */
4200 asoc->peers_rwnd = 0;
4202 if (asoc->peers_rwnd > old_rwnd) {
4203 win_probe_recovery = 1;
4205 /* Now assure a timer where data is queued at */
4208 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4211 if (win_probe_recovery && (net->window_probe)) {
4212 win_probe_recovered = 1;
4214 * Find first chunk that was used with window probe
4215 * and clear the sent
4217 /* sa_ignore FREED_MEMORY */
4218 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4219 if (tp1->window_probe) {
4220 /* move back to data send queue */
4221 sctp_window_probe_recovery(stcb, asoc, tp1);
4226 if (net->RTO == 0) {
4227 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4229 to_ticks = MSEC_TO_TICKS(net->RTO);
4231 if (net->flight_size) {
4233 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4234 sctp_timeout_handler, &net->rxt_timer);
4235 if (net->window_probe) {
4236 net->window_probe = 0;
4239 if (net->window_probe) {
4241 * In window probes we must assure a timer
4242 * is still running there
4244 net->window_probe = 0;
4245 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4246 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4247 sctp_timeout_handler, &net->rxt_timer);
4249 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4250 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4252 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4257 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4258 (asoc->sent_queue_retran_cnt == 0) &&
4259 (win_probe_recovered == 0) &&
4262 * huh, this should not happen unless all packets are
4263 * PR-SCTP and marked to skip of course.
4265 if (sctp_fs_audit(asoc)) {
4266 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4267 net->flight_size = 0;
4269 asoc->total_flight = 0;
4270 asoc->total_flight_count = 0;
4271 asoc->sent_queue_retran_cnt = 0;
4272 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4273 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4274 sctp_flight_size_increase(tp1);
4275 sctp_total_flight_increase(stcb, tp1);
4276 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4277 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4284 /**********************************/
4285 /* Now what about shutdown issues */
4286 /**********************************/
4287 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4288 /* nothing left on sendqueue.. consider done */
4290 if ((asoc->stream_queue_cnt == 1) &&
4291 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4292 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4293 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4294 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4296 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4297 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4298 (asoc->stream_queue_cnt == 1) &&
4299 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4300 struct mbuf *op_err;
4304 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4305 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4306 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4309 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4310 (asoc->stream_queue_cnt == 0)) {
4311 struct sctp_nets *netp;
4313 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4314 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4315 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4317 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4318 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4319 sctp_stop_timers_for_shutdown(stcb);
4320 if (asoc->alternate) {
4321 netp = asoc->alternate;
4323 netp = asoc->primary_destination;
4325 sctp_send_shutdown(stcb, netp);
4326 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4327 stcb->sctp_ep, stcb, netp);
4328 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4329 stcb->sctp_ep, stcb, netp);
4330 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4331 (asoc->stream_queue_cnt == 0)) {
4332 struct sctp_nets *netp;
4334 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4335 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4336 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4337 sctp_stop_timers_for_shutdown(stcb);
4338 if (asoc->alternate) {
4339 netp = asoc->alternate;
4341 netp = asoc->primary_destination;
4343 sctp_send_shutdown_ack(stcb, netp);
4344 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4345 stcb->sctp_ep, stcb, netp);
4348 /*********************************************/
4349 /* Here we perform PR-SCTP procedures */
4351 /*********************************************/
4352 /* C1. update advancedPeerAckPoint */
4353 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4354 asoc->advanced_peer_ack_point = cumack;
4356 /* PR-Sctp issues need to be addressed too */
4357 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4358 struct sctp_tmit_chunk *lchk;
4359 uint32_t old_adv_peer_ack_point;
4361 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4362 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4363 /* C3. See if we need to send a Fwd-TSN */
4364 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4366 * ISSUE with ECN, see FWD-TSN processing.
4368 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4369 send_forward_tsn(stcb, asoc);
4371 /* try to FR fwd-tsn's that get lost too */
4372 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4373 send_forward_tsn(stcb, asoc);
4378 /* Assure a timer is up */
4379 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4380 stcb->sctp_ep, stcb, lchk->whoTo);
4383 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4384 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4386 stcb->asoc.peers_rwnd,
4387 stcb->asoc.total_flight,
4388 stcb->asoc.total_output_queue_size);
4393 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4394 struct sctp_tcb *stcb,
4395 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4396 int *abort_now, uint8_t flags,
4397 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4399 struct sctp_association *asoc;
4400 struct sctp_tmit_chunk *tp1, *tp2;
4401 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4402 uint16_t wake_him = 0;
4403 uint32_t send_s = 0;
4405 int accum_moved = 0;
4406 int will_exit_fast_recovery = 0;
4407 uint32_t a_rwnd, old_rwnd;
4408 int win_probe_recovery = 0;
4409 int win_probe_recovered = 0;
4410 struct sctp_nets *net = NULL;
4413 uint8_t reneged_all = 0;
4414 uint8_t cmt_dac_flag;
4417 * we take any chance we can to service our queues since we cannot
4418 * get awoken when the socket is read from :<
4421 * Now perform the actual SACK handling: 1) Verify that it is not an
4422 * old sack, if so discard. 2) If there is nothing left in the send
4423 * queue (cum-ack is equal to last acked) then you have a duplicate
4424 * too, update any rwnd change and verify no timers are running.
4425 * then return. 3) Process any new consequtive data i.e. cum-ack
4426 * moved process these first and note that it moved. 4) Process any
4427 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4428 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4429 * sync up flightsizes and things, stop all timers and also check
4430 * for shutdown_pending state. If so then go ahead and send off the
4431 * shutdown. If in shutdown recv, send off the shutdown-ack and
4432 * start that timer, Ret. 9) Strike any non-acked things and do FR
4433 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4434 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4435 * if in shutdown_recv state.
4437 SCTP_TCB_LOCK_ASSERT(stcb);
4439 this_sack_lowest_newack = 0;
4440 SCTP_STAT_INCR(sctps_slowpath_sack);
4442 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4443 #ifdef SCTP_ASOCLOG_OF_TSNS
4444 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4445 stcb->asoc.cumack_log_at++;
4446 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4447 stcb->asoc.cumack_log_at = 0;
4452 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4453 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4454 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4456 old_rwnd = stcb->asoc.peers_rwnd;
4457 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4458 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4459 stcb->asoc.overall_error_count,
4461 SCTP_FROM_SCTP_INDATA,
4464 stcb->asoc.overall_error_count = 0;
4466 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4467 sctp_log_sack(asoc->last_acked_seq,
4474 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4476 uint32_t *dupdata, dblock;
4478 for (i = 0; i < num_dup; i++) {
4479 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4480 sizeof(uint32_t), (uint8_t *)&dblock);
4481 if (dupdata == NULL) {
4484 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4488 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4489 tp1 = TAILQ_LAST(&asoc->sent_queue,
4490 sctpchunk_listhead);
4491 send_s = tp1->rec.data.tsn + 1;
4494 send_s = asoc->sending_seq;
4496 if (SCTP_TSN_GE(cum_ack, send_s)) {
4497 struct mbuf *op_err;
4498 char msg[SCTP_DIAG_INFO_LEN];
4501 * no way, we have not even sent this TSN out yet. Peer is
4502 * hopelessly messed up with us.
4504 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4507 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4508 tp1->rec.data.tsn, (void *)tp1);
4513 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4515 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4516 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4517 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4520 /**********************/
4521 /* 1) check the range */
4522 /**********************/
4523 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4524 /* acking something behind */
4527 /* update the Rwnd of the peer */
4528 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4529 TAILQ_EMPTY(&asoc->send_queue) &&
4530 (asoc->stream_queue_cnt == 0)) {
4531 /* nothing left on send/sent and strmq */
4532 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4533 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4534 asoc->peers_rwnd, 0, 0, a_rwnd);
4536 asoc->peers_rwnd = a_rwnd;
4537 if (asoc->sent_queue_retran_cnt) {
4538 asoc->sent_queue_retran_cnt = 0;
4540 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4541 /* SWS sender side engages */
4542 asoc->peers_rwnd = 0;
4544 /* stop any timers */
4545 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4546 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4547 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4548 net->partial_bytes_acked = 0;
4549 net->flight_size = 0;
4551 asoc->total_flight = 0;
4552 asoc->total_flight_count = 0;
4556 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4557 * things. The total byte count acked is tracked in netAckSz AND
4558 * netAck2 is used to track the total bytes acked that are un-
4559 * amibguious and were never retransmitted. We track these on a per
4560 * destination address basis.
4562 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4563 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4564 /* Drag along the window_tsn for cwr's */
4565 net->cwr_window_tsn = cum_ack;
4567 net->prev_cwnd = net->cwnd;
4572 * CMT: Reset CUC and Fast recovery algo variables before
4575 net->new_pseudo_cumack = 0;
4576 net->will_exit_fast_recovery = 0;
4577 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4578 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4581 /* process the new consecutive TSN first */
4582 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4583 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4584 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4586 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4588 * If it is less than ACKED, it is
4589 * now no-longer in flight. Higher
4590 * values may occur during marking
4592 if ((tp1->whoTo->dest_state &
4593 SCTP_ADDR_UNCONFIRMED) &&
4594 (tp1->snd_count < 2)) {
4596 * If there was no retran
4597 * and the address is
4598 * un-confirmed and we sent
4600 * sacked.. its confirmed,
4603 tp1->whoTo->dest_state &=
4604 ~SCTP_ADDR_UNCONFIRMED;
4606 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4607 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4608 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4609 tp1->whoTo->flight_size,
4611 (uint32_t)(uintptr_t)tp1->whoTo,
4614 sctp_flight_size_decrease(tp1);
4615 sctp_total_flight_decrease(stcb, tp1);
4616 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4617 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4621 tp1->whoTo->net_ack += tp1->send_size;
4623 /* CMT SFR and DAC algos */
4624 this_sack_lowest_newack = tp1->rec.data.tsn;
4625 tp1->whoTo->saw_newack = 1;
4627 if (tp1->snd_count < 2) {
4629 * True non-retransmited
4632 tp1->whoTo->net_ack2 +=
4635 /* update RTO too? */
4639 sctp_calculate_rto(stcb,
4641 &tp1->sent_rcv_time,
4642 sctp_align_safe_nocopy,
4643 SCTP_RTT_FROM_DATA);
4646 if (tp1->whoTo->rto_needed == 0) {
4647 tp1->whoTo->rto_needed = 1;
4653 * CMT: CUCv2 algorithm. From the
4654 * cumack'd TSNs, for each TSN being
4655 * acked for the first time, set the
4656 * following variables for the
4657 * corresp destination.
4658 * new_pseudo_cumack will trigger a
4660 * find_(rtx_)pseudo_cumack will
4661 * trigger search for the next
4662 * expected (rtx-)pseudo-cumack.
4664 tp1->whoTo->new_pseudo_cumack = 1;
4665 tp1->whoTo->find_pseudo_cumack = 1;
4666 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4669 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4670 sctp_log_sack(asoc->last_acked_seq,
4675 SCTP_LOG_TSN_ACKED);
4677 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4678 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4681 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4682 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4683 #ifdef SCTP_AUDITING_ENABLED
4684 sctp_audit_log(0xB3,
4685 (asoc->sent_queue_retran_cnt & 0x000000ff));
4688 if (tp1->rec.data.chunk_was_revoked) {
4689 /* deflate the cwnd */
4690 tp1->whoTo->cwnd -= tp1->book_size;
4691 tp1->rec.data.chunk_was_revoked = 0;
4693 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4694 tp1->sent = SCTP_DATAGRAM_ACKED;
4701 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4702 /* always set this up to cum-ack */
4703 asoc->this_sack_highest_gap = last_tsn;
4705 if ((num_seg > 0) || (num_nr_seg > 0)) {
4708 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4709 * to be greater than the cumack. Also reset saw_newack to 0
4712 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4713 net->saw_newack = 0;
4714 net->this_sack_highest_newack = last_tsn;
4718 * thisSackHighestGap will increase while handling NEW
4719 * segments this_sack_highest_newack will increase while
4720 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4721 * used for CMT DAC algo. saw_newack will also change.
4723 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4724 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4725 num_seg, num_nr_seg, &rto_ok)) {
4729 * validate the biggest_tsn_acked in the gap acks if strict
4730 * adherence is wanted.
4732 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4734 * peer is either confused or we are under attack.
4737 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4738 biggest_tsn_acked, send_s);
4742 /*******************************************/
4743 /* cancel ALL T3-send timer if accum moved */
4744 /*******************************************/
4745 if (asoc->sctp_cmt_on_off > 0) {
4746 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4747 if (net->new_pseudo_cumack)
4748 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4750 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4755 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4756 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4757 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4761 /********************************************/
4762 /* drop the acked chunks from the sentqueue */
4763 /********************************************/
4764 asoc->last_acked_seq = cum_ack;
4766 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4767 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4770 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4771 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4772 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4775 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4779 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4780 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4781 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4782 asoc->trigger_reset = 1;
4784 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4785 if (PR_SCTP_ENABLED(tp1->flags)) {
4786 if (asoc->pr_sctp_cnt != 0)
4787 asoc->pr_sctp_cnt--;
4789 asoc->sent_queue_cnt--;
4791 /* sa_ignore NO_NULL_CHK */
4792 sctp_free_bufspace(stcb, asoc, tp1, 1);
4793 sctp_m_freem(tp1->data);
4795 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4796 asoc->sent_queue_cnt_removeable--;
4799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4800 sctp_log_sack(asoc->last_acked_seq,
4805 SCTP_LOG_FREE_SENT);
4807 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4810 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4812 panic("Warning flight size is positive and should be 0");
4814 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4815 asoc->total_flight);
4817 asoc->total_flight = 0;
4819 /* sa_ignore NO_NULL_CHK */
4820 if ((wake_him) && (stcb->sctp_socket)) {
4821 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4825 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4826 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4827 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4829 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4830 so = SCTP_INP_SO(stcb->sctp_ep);
4831 atomic_add_int(&stcb->asoc.refcnt, 1);
4832 SCTP_TCB_UNLOCK(stcb);
4833 SCTP_SOCKET_LOCK(so, 1);
4834 SCTP_TCB_LOCK(stcb);
4835 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4836 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4837 /* assoc was freed while we were unlocked */
4838 SCTP_SOCKET_UNLOCK(so, 1);
4842 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4843 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4844 SCTP_SOCKET_UNLOCK(so, 1);
4847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4848 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4852 if (asoc->fast_retran_loss_recovery && accum_moved) {
4853 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4854 /* Setup so we will exit RFC2582 fast recovery */
4855 will_exit_fast_recovery = 1;
4859 * Check for revoked fragments:
4861 * if Previous sack - Had no frags then we can't have any revoked if
4862 * Previous sack - Had frag's then - If we now have frags aka
4863 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4864 * some of them. else - The peer revoked all ACKED fragments, since
4865 * we had some before and now we have NONE.
4869 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4870 asoc->saw_sack_with_frags = 1;
4871 } else if (asoc->saw_sack_with_frags) {
4872 int cnt_revoked = 0;
4874 /* Peer revoked all dg's marked or acked */
4875 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4876 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4877 tp1->sent = SCTP_DATAGRAM_SENT;
4878 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4879 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4880 tp1->whoTo->flight_size,
4882 (uint32_t)(uintptr_t)tp1->whoTo,
4885 sctp_flight_size_increase(tp1);
4886 sctp_total_flight_increase(stcb, tp1);
4887 tp1->rec.data.chunk_was_revoked = 1;
4889 * To ensure that this increase in
4890 * flightsize, which is artificial, does not
4891 * throttle the sender, we also increase the
4892 * cwnd artificially.
4894 tp1->whoTo->cwnd += tp1->book_size;
4901 asoc->saw_sack_with_frags = 0;
4904 asoc->saw_sack_with_nr_frags = 1;
4906 asoc->saw_sack_with_nr_frags = 0;
4908 /* JRS - Use the congestion control given in the CC module */
4909 if (ecne_seen == 0) {
4910 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4911 if (net->net_ack2 > 0) {
4913 * Karn's rule applies to clearing error
4914 * count, this is optional.
4916 net->error_count = 0;
4917 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4918 /* addr came good */
4919 net->dest_state |= SCTP_ADDR_REACHABLE;
4920 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4921 0, (void *)net, SCTP_SO_NOT_LOCKED);
4923 if (net == stcb->asoc.primary_destination) {
4924 if (stcb->asoc.alternate) {
4926 * release the alternate,
4929 sctp_free_remote_addr(stcb->asoc.alternate);
4930 stcb->asoc.alternate = NULL;
4933 if (net->dest_state & SCTP_ADDR_PF) {
4934 net->dest_state &= ~SCTP_ADDR_PF;
4935 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4936 stcb->sctp_ep, stcb, net,
4937 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4938 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4939 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4940 /* Done with this net */
4943 /* restore any doubled timers */
4944 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4945 if (net->RTO < stcb->asoc.minrto) {
4946 net->RTO = stcb->asoc.minrto;
4948 if (net->RTO > stcb->asoc.maxrto) {
4949 net->RTO = stcb->asoc.maxrto;
4953 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4955 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4956 /* nothing left in-flight */
4957 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4958 /* stop all timers */
4959 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4961 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4962 net->flight_size = 0;
4963 net->partial_bytes_acked = 0;
4965 asoc->total_flight = 0;
4966 asoc->total_flight_count = 0;
4968 /**********************************/
4969 /* Now what about shutdown issues */
4970 /**********************************/
4971 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4972 /* nothing left on sendqueue.. consider done */
4973 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4974 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4975 asoc->peers_rwnd, 0, 0, a_rwnd);
4977 asoc->peers_rwnd = a_rwnd;
4978 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4979 /* SWS sender side engages */
4980 asoc->peers_rwnd = 0;
4983 if ((asoc->stream_queue_cnt == 1) &&
4984 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4985 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4986 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4987 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4989 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4990 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4991 (asoc->stream_queue_cnt == 1) &&
4992 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4993 struct mbuf *op_err;
4997 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4998 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4999 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5002 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5003 (asoc->stream_queue_cnt == 0)) {
5004 struct sctp_nets *netp;
5006 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5007 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5008 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5010 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5011 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5012 sctp_stop_timers_for_shutdown(stcb);
5013 if (asoc->alternate) {
5014 netp = asoc->alternate;
5016 netp = asoc->primary_destination;
5018 sctp_send_shutdown(stcb, netp);
5019 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5020 stcb->sctp_ep, stcb, netp);
5021 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5022 stcb->sctp_ep, stcb, netp);
5024 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5025 (asoc->stream_queue_cnt == 0)) {
5026 struct sctp_nets *netp;
5028 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5029 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5030 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5031 sctp_stop_timers_for_shutdown(stcb);
5032 if (asoc->alternate) {
5033 netp = asoc->alternate;
5035 netp = asoc->primary_destination;
5037 sctp_send_shutdown_ack(stcb, netp);
5038 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5039 stcb->sctp_ep, stcb, netp);
5044 * Now here we are going to recycle net_ack for a different use...
5047 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5052 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5053 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5054 * automatically ensure that.
5056 if ((asoc->sctp_cmt_on_off > 0) &&
5057 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5058 (cmt_dac_flag == 0)) {
5059 this_sack_lowest_newack = cum_ack;
5061 if ((num_seg > 0) || (num_nr_seg > 0)) {
5062 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5063 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5065 /* JRS - Use the congestion control given in the CC module */
5066 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5068 /* Now are we exiting loss recovery ? */
5069 if (will_exit_fast_recovery) {
5070 /* Ok, we must exit fast recovery */
5071 asoc->fast_retran_loss_recovery = 0;
5073 if ((asoc->sat_t3_loss_recovery) &&
5074 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5075 /* end satellite t3 loss recovery */
5076 asoc->sat_t3_loss_recovery = 0;
5081 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5082 if (net->will_exit_fast_recovery) {
5083 /* Ok, we must exit fast recovery */
5084 net->fast_retran_loss_recovery = 0;
5088 /* Adjust and set the new rwnd value */
5089 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5090 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5091 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5093 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5094 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5095 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5096 /* SWS sender side engages */
5097 asoc->peers_rwnd = 0;
5099 if (asoc->peers_rwnd > old_rwnd) {
5100 win_probe_recovery = 1;
5103 * Now we must setup so we have a timer up for anyone with
5109 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5110 if (win_probe_recovery && (net->window_probe)) {
5111 win_probe_recovered = 1;
5113 * Find first chunk that was used with
5114 * window probe and clear the event. Put
5115 * it back into the send queue as if has
5118 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5119 if (tp1->window_probe) {
5120 sctp_window_probe_recovery(stcb, asoc, tp1);
5125 if (net->flight_size) {
5127 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5128 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5129 stcb->sctp_ep, stcb, net);
5131 if (net->window_probe) {
5132 net->window_probe = 0;
5135 if (net->window_probe) {
5137 * In window probes we must assure a timer
5138 * is still running there
5140 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5141 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5142 stcb->sctp_ep, stcb, net);
5145 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5146 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5148 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5153 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5154 (asoc->sent_queue_retran_cnt == 0) &&
5155 (win_probe_recovered == 0) &&
5158 * huh, this should not happen unless all packets are
5159 * PR-SCTP and marked to skip of course.
5161 if (sctp_fs_audit(asoc)) {
5162 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5163 net->flight_size = 0;
5165 asoc->total_flight = 0;
5166 asoc->total_flight_count = 0;
5167 asoc->sent_queue_retran_cnt = 0;
5168 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5169 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5170 sctp_flight_size_increase(tp1);
5171 sctp_total_flight_increase(stcb, tp1);
5172 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5173 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5180 /*********************************************/
5181 /* Here we perform PR-SCTP procedures */
5183 /*********************************************/
5184 /* C1. update advancedPeerAckPoint */
5185 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5186 asoc->advanced_peer_ack_point = cum_ack;
5188 /* C2. try to further move advancedPeerAckPoint ahead */
5189 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5190 struct sctp_tmit_chunk *lchk;
5191 uint32_t old_adv_peer_ack_point;
5193 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5194 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5195 /* C3. See if we need to send a Fwd-TSN */
5196 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5198 * ISSUE with ECN, see FWD-TSN processing.
5200 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5201 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5202 0xee, cum_ack, asoc->advanced_peer_ack_point,
5203 old_adv_peer_ack_point);
5205 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5206 send_forward_tsn(stcb, asoc);
5208 /* try to FR fwd-tsn's that get lost too */
5209 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5210 send_forward_tsn(stcb, asoc);
5215 /* Assure a timer is up */
5216 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5217 stcb->sctp_ep, stcb, lchk->whoTo);
5220 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5221 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5223 stcb->asoc.peers_rwnd,
5224 stcb->asoc.total_flight,
5225 stcb->asoc.total_output_queue_size);
5230 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5233 uint32_t cum_ack, a_rwnd;
5235 cum_ack = ntohl(cp->cumulative_tsn_ack);
5236 /* Arrange so a_rwnd does NOT change */
5237 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5239 /* Now call the express sack handling */
5240 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5244 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5245 struct sctp_stream_in *strmin)
5247 struct sctp_queued_to_read *control, *ncontrol;
5248 struct sctp_association *asoc;
5250 int need_reasm_check = 0;
5253 mid = strmin->last_mid_delivered;
5255 * First deliver anything prior to and including the stream no that
5258 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5259 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5260 /* this is deliverable now */
5261 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5262 if (control->on_strm_q) {
5263 if (control->on_strm_q == SCTP_ON_ORDERED) {
5264 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5265 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5266 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5269 panic("strmin: %p ctl: %p unknown %d",
5270 strmin, control, control->on_strm_q);
5273 control->on_strm_q = 0;
5275 /* subtract pending on streams */
5276 if (asoc->size_on_all_streams >= control->length) {
5277 asoc->size_on_all_streams -= control->length;
5280 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5282 asoc->size_on_all_streams = 0;
5285 sctp_ucount_decr(asoc->cnt_on_all_streams);
5286 /* deliver it to at least the delivery-q */
5287 if (stcb->sctp_socket) {
5288 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5289 sctp_add_to_readq(stcb->sctp_ep, stcb,
5291 &stcb->sctp_socket->so_rcv,
5292 1, SCTP_READ_LOCK_HELD,
5293 SCTP_SO_NOT_LOCKED);
5296 /* Its a fragmented message */
5297 if (control->first_frag_seen) {
5299 * Make it so this is next to
5300 * deliver, we restore later
5302 strmin->last_mid_delivered = control->mid - 1;
5303 need_reasm_check = 1;
5308 /* no more delivery now. */
5312 if (need_reasm_check) {
5315 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5316 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5317 /* Restore the next to deliver unless we are ahead */
5318 strmin->last_mid_delivered = mid;
5321 /* Left the front Partial one on */
5324 need_reasm_check = 0;
5327 * now we must deliver things in queue the normal way if any are
5330 mid = strmin->last_mid_delivered + 1;
5331 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5332 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5333 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5334 /* this is deliverable now */
5335 if (control->on_strm_q) {
5336 if (control->on_strm_q == SCTP_ON_ORDERED) {
5337 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5338 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5339 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5342 panic("strmin: %p ctl: %p unknown %d",
5343 strmin, control, control->on_strm_q);
5346 control->on_strm_q = 0;
5348 /* subtract pending on streams */
5349 if (asoc->size_on_all_streams >= control->length) {
5350 asoc->size_on_all_streams -= control->length;
5353 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5355 asoc->size_on_all_streams = 0;
5358 sctp_ucount_decr(asoc->cnt_on_all_streams);
5359 /* deliver it to at least the delivery-q */
5360 strmin->last_mid_delivered = control->mid;
5361 if (stcb->sctp_socket) {
5362 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5363 sctp_add_to_readq(stcb->sctp_ep, stcb,
5365 &stcb->sctp_socket->so_rcv, 1,
5366 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5369 mid = strmin->last_mid_delivered + 1;
5371 /* Its a fragmented message */
5372 if (control->first_frag_seen) {
5374 * Make it so this is next to
5377 strmin->last_mid_delivered = control->mid - 1;
5378 need_reasm_check = 1;
5386 if (need_reasm_check) {
5387 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5394 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5395 struct sctp_association *asoc,
5396 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5398 struct sctp_queued_to_read *control;
5399 struct sctp_stream_in *strm;
5400 struct sctp_tmit_chunk *chk, *nchk;
5401 int cnt_removed = 0;
5404 * For now large messages held on the stream reasm that are complete
5405 * will be tossed too. We could in theory do more work to spin
5406 * through and stop after dumping one msg aka seeing the start of a
5407 * new msg at the head, and call the delivery function... to see if
5408 * it can be delivered... But for now we just dump everything on the
5411 strm = &asoc->strmin[stream];
5412 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5413 if (control == NULL) {
5417 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5420 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5421 /* Purge hanging chunks */
5422 if (!asoc->idata_supported && (ordered == 0)) {
5423 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5428 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5429 if (asoc->size_on_reasm_queue >= chk->send_size) {
5430 asoc->size_on_reasm_queue -= chk->send_size;
5433 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5435 asoc->size_on_reasm_queue = 0;
5438 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5440 sctp_m_freem(chk->data);
5443 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5445 if (!TAILQ_EMPTY(&control->reasm)) {
5446 /* This has to be old data, unordered */
5447 if (control->data) {
5448 sctp_m_freem(control->data);
5449 control->data = NULL;
5451 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5452 chk = TAILQ_FIRST(&control->reasm);
5453 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5454 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5455 sctp_add_chk_to_control(control, strm, stcb, asoc,
5456 chk, SCTP_READ_LOCK_HELD);
5458 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5461 if (control->on_strm_q == SCTP_ON_ORDERED) {
5462 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5463 if (asoc->size_on_all_streams >= control->length) {
5464 asoc->size_on_all_streams -= control->length;
5467 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5469 asoc->size_on_all_streams = 0;
5472 sctp_ucount_decr(asoc->cnt_on_all_streams);
5473 control->on_strm_q = 0;
5474 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5475 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5476 control->on_strm_q = 0;
5478 } else if (control->on_strm_q) {
5479 panic("strm: %p ctl: %p unknown %d",
5480 strm, control, control->on_strm_q);
5483 control->on_strm_q = 0;
5484 if (control->on_read_q == 0) {
5485 sctp_free_remote_addr(control->whoFrom);
5486 if (control->data) {
5487 sctp_m_freem(control->data);
5488 control->data = NULL;
5490 sctp_free_a_readq(stcb, control);
5495 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5496 struct sctp_forward_tsn_chunk *fwd,
5497 int *abort_flag, struct mbuf *m, int offset)
5499 /* The pr-sctp fwd tsn */
5501 * here we will perform all the data receiver side steps for
5502 * processing FwdTSN, as required in by pr-sctp draft:
5504 * Assume we get FwdTSN(x):
5506 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5507 * + others we have 3) examine and update re-ordering queue on
5508 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5509 * report where we are.
5511 struct sctp_association *asoc;
5512 uint32_t new_cum_tsn, gap;
5513 unsigned int i, fwd_sz, m_size;
5515 struct sctp_stream_in *strm;
5516 struct sctp_queued_to_read *control, *sv;
5519 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5520 SCTPDBG(SCTP_DEBUG_INDATA1,
5521 "Bad size too small/big fwd-tsn\n");
5524 m_size = (stcb->asoc.mapping_array_size << 3);
5525 /*************************************************************/
5526 /* 1. Here we update local cumTSN and shift the bitmap array */
5527 /*************************************************************/
5528 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5530 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5531 /* Already got there ... */
5535 * now we know the new TSN is more advanced, let's find the actual
5538 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5539 asoc->cumulative_tsn = new_cum_tsn;
5540 if (gap >= m_size) {
5541 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5542 struct mbuf *op_err;
5543 char msg[SCTP_DIAG_INFO_LEN];
5546 * out of range (of single byte chunks in the rwnd I
5547 * give out). This must be an attacker.
5550 snprintf(msg, sizeof(msg),
5551 "New cum ack %8.8x too high, highest TSN %8.8x",
5552 new_cum_tsn, asoc->highest_tsn_inside_map);
5553 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5554 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5555 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5558 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5560 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5561 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5562 asoc->highest_tsn_inside_map = new_cum_tsn;
5564 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5565 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5567 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5568 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5571 SCTP_TCB_LOCK_ASSERT(stcb);
5572 for (i = 0; i <= gap; i++) {
5573 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5574 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5575 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5576 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5577 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5582 /*************************************************************/
5583 /* 2. Clear up re-assembly queue */
5584 /*************************************************************/
5586 /* This is now done as part of clearing up the stream/seq */
5587 if (asoc->idata_supported == 0) {
5590 /* Flush all the un-ordered data based on cum-tsn */
5591 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5592 for (sid = 0; sid < asoc->streamincnt; sid++) {
5593 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5595 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5597 /*******************************************************/
5598 /* 3. Update the PR-stream re-ordering queues and fix */
5599 /* delivery issues as needed. */
5600 /*******************************************************/
5601 fwd_sz -= sizeof(*fwd);
5604 unsigned int num_str;
5605 uint32_t mid, cur_mid;
5607 uint16_t ordered, flags;
5608 struct sctp_strseq *stseq, strseqbuf;
5609 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5611 offset += sizeof(*fwd);
5613 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5614 if (asoc->idata_supported) {
5615 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5617 num_str = fwd_sz / sizeof(struct sctp_strseq);
5619 for (i = 0; i < num_str; i++) {
5620 if (asoc->idata_supported) {
5621 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5622 sizeof(struct sctp_strseq_mid),
5623 (uint8_t *)&strseqbuf_m);
5624 offset += sizeof(struct sctp_strseq_mid);
5625 if (stseq_m == NULL) {
5628 sid = ntohs(stseq_m->sid);
5629 mid = ntohl(stseq_m->mid);
5630 flags = ntohs(stseq_m->flags);
5631 if (flags & PR_SCTP_UNORDERED_FLAG) {
5637 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5638 sizeof(struct sctp_strseq),
5639 (uint8_t *)&strseqbuf);
5640 offset += sizeof(struct sctp_strseq);
5641 if (stseq == NULL) {
5644 sid = ntohs(stseq->sid);
5645 mid = (uint32_t)ntohs(stseq->ssn);
5653 * Ok we now look for the stream/seq on the read
5654 * queue where its not all delivered. If we find it
5655 * we transmute the read entry into a PDI_ABORTED.
5657 if (sid >= asoc->streamincnt) {
5658 /* screwed up streams, stop! */
5661 if ((asoc->str_of_pdapi == sid) &&
5662 (asoc->ssn_of_pdapi == mid)) {
5664 * If this is the one we were partially
5665 * delivering now then we no longer are.
5666 * Note this will change with the reassembly
5669 asoc->fragmented_delivery_inprogress = 0;
5671 strm = &asoc->strmin[sid];
5672 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5673 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5675 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5676 if ((control->sinfo_stream == sid) &&
5677 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5678 str_seq = (sid << 16) | (0x0000ffff & mid);
5679 control->pdapi_aborted = 1;
5680 sv = stcb->asoc.control_pdapi;
5681 control->end_added = 1;
5682 if (control->on_strm_q == SCTP_ON_ORDERED) {
5683 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5684 if (asoc->size_on_all_streams >= control->length) {
5685 asoc->size_on_all_streams -= control->length;
5688 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5690 asoc->size_on_all_streams = 0;
5693 sctp_ucount_decr(asoc->cnt_on_all_streams);
5694 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5695 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5697 } else if (control->on_strm_q) {
5698 panic("strm: %p ctl: %p unknown %d",
5699 strm, control, control->on_strm_q);
5702 control->on_strm_q = 0;
5703 stcb->asoc.control_pdapi = control;
5704 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5706 SCTP_PARTIAL_DELIVERY_ABORTED,
5708 SCTP_SO_NOT_LOCKED);
5709 stcb->asoc.control_pdapi = sv;
5711 } else if ((control->sinfo_stream == sid) &&
5712 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5713 /* We are past our victim SSN */
5717 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5718 /* Update the sequence number */
5719 strm->last_mid_delivered = mid;
5721 /* now kick the stream the new way */
5722 /* sa_ignore NO_NULL_CHK */
5723 sctp_kick_prsctp_reorder_queue(stcb, strm);
5725 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5728 * Now slide thing forward.
5730 sctp_slide_mapping_arrays(stcb);