2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int hold_rlock);
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
78 /* Calculate what the rwnd would be */
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
85 * This is really set wrong with respect to a 1-2-m socket. Since
86 * the sb_cc is the count that everyone as put up. When we re-write
87 * sctp_soreceive then we will fix this so that ONLY this
88 * associations data is taken into account.
90 if (stcb->sctp_socket == NULL) {
94 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 if (stcb->asoc.sb_cc == 0 &&
99 asoc->cnt_on_reasm_queue == 0 &&
100 asoc->cnt_on_all_streams == 0) {
101 /* Full rwnd granted */
102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
105 /* get actual space */
106 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
108 * take out what has NOT been put on socket queue and we yet hold
111 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 asoc->cnt_on_reasm_queue * MSIZE));
113 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 asoc->cnt_on_all_streams * MSIZE));
120 /* what is the overhead of all these rwnd's */
121 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
123 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 * even it is 0. SWS engaged
126 if (calc < stcb->asoc.my_rwnd_control_len) {
135 * Build out our readq entry based on the incoming packet.
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139 struct sctp_nets *net,
140 uint32_t tsn, uint32_t ppid,
141 uint32_t context, uint16_t sid,
142 uint32_t mid, uint8_t flags,
145 struct sctp_queued_to_read *read_queue_e = NULL;
147 sctp_alloc_a_readq(stcb, read_queue_e);
148 if (read_queue_e == NULL) {
151 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 read_queue_e->sinfo_stream = sid;
153 read_queue_e->sinfo_flags = (flags << 8);
154 read_queue_e->sinfo_ppid = ppid;
155 read_queue_e->sinfo_context = context;
156 read_queue_e->sinfo_tsn = tsn;
157 read_queue_e->sinfo_cumtsn = tsn;
158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 read_queue_e->mid = mid;
160 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 TAILQ_INIT(&read_queue_e->reasm);
162 read_queue_e->whoFrom = net;
163 atomic_add_int(&net->ref_count, 1);
164 read_queue_e->data = dm;
165 read_queue_e->stcb = stcb;
166 read_queue_e->port_from = stcb->rport;
167 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
168 read_queue_e->do_not_ref_stcb = 1;
171 return (read_queue_e);
175 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
177 struct sctp_extrcvinfo *seinfo;
178 struct sctp_sndrcvinfo *outinfo;
179 struct sctp_rcvinfo *rcvinfo;
180 struct sctp_nxtinfo *nxtinfo;
187 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
188 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
189 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
190 /* user does not want any ancillary data */
195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
196 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
198 seinfo = (struct sctp_extrcvinfo *)sinfo;
199 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
200 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
202 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
206 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
207 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
209 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
212 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
218 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
223 SCTP_BUF_LEN(ret) = 0;
225 /* We need a CMSG header followed by the struct */
226 cmh = mtod(ret, struct cmsghdr *);
228 * Make sure that there is no un-initialized padding between the
229 * cmsg header and cmsg data and after the cmsg data.
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
233 cmh->cmsg_level = IPPROTO_SCTP;
234 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
235 cmh->cmsg_type = SCTP_RCVINFO;
236 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
237 rcvinfo->rcv_sid = sinfo->sinfo_stream;
238 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
239 rcvinfo->rcv_flags = sinfo->sinfo_flags;
240 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
241 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
242 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
243 rcvinfo->rcv_context = sinfo->sinfo_context;
244 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
245 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
246 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
249 cmh->cmsg_level = IPPROTO_SCTP;
250 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
251 cmh->cmsg_type = SCTP_NXTINFO;
252 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
253 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
254 nxtinfo->nxt_flags = 0;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
256 nxtinfo->nxt_flags |= SCTP_UNORDERED;
258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
259 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
261 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
262 nxtinfo->nxt_flags |= SCTP_COMPLETE;
264 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
265 nxtinfo->nxt_length = seinfo->serinfo_next_length;
266 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
267 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
268 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
270 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
271 cmh->cmsg_level = IPPROTO_SCTP;
272 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
274 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
275 cmh->cmsg_type = SCTP_EXTRCV;
276 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
277 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
279 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
280 cmh->cmsg_type = SCTP_SNDRCV;
282 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
295 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
298 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
300 * This tsn is behind the cum ack and thus we don't need to
301 * worry about it being moved from one to the other.
305 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
306 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
307 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
308 KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __FUNCTION__));
310 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
311 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
312 asoc->highest_tsn_inside_nr_map = tsn;
316 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
317 if (tsn == asoc->highest_tsn_inside_map) {
318 /* We must back down to see what the new highest is. */
319 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
320 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
321 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
322 asoc->highest_tsn_inside_map = i;
326 if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
327 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
334 sctp_place_control_in_stream(struct sctp_stream_in *strm,
335 struct sctp_association *asoc,
336 struct sctp_queued_to_read *control)
338 struct sctp_queued_to_read *at;
339 struct sctp_readhead *q;
340 uint8_t flags, unordered;
342 flags = (control->sinfo_flags >> 8);
343 unordered = flags & SCTP_DATA_UNORDERED;
345 q = &strm->uno_inqueue;
346 if (asoc->idata_supported == 0) {
347 if (!TAILQ_EMPTY(q)) {
349 * Only one stream can be here in old style
354 TAILQ_INSERT_TAIL(q, control, next_instrm);
355 control->on_strm_q = SCTP_ON_UNORDERED;
361 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
362 control->end_added = 1;
363 control->first_frag_seen = 1;
364 control->last_frag_seen = 1;
366 if (TAILQ_EMPTY(q)) {
368 TAILQ_INSERT_HEAD(q, control, next_instrm);
370 control->on_strm_q = SCTP_ON_UNORDERED;
372 control->on_strm_q = SCTP_ON_ORDERED;
376 TAILQ_FOREACH(at, q, next_instrm) {
377 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
379 * one in queue is bigger than the new one,
380 * insert before this one
382 TAILQ_INSERT_BEFORE(at, control, next_instrm);
384 control->on_strm_q = SCTP_ON_UNORDERED;
386 control->on_strm_q = SCTP_ON_ORDERED;
389 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
391 * Gak, He sent me a duplicate msg id
392 * number?? return -1 to abort.
396 if (TAILQ_NEXT(at, next_instrm) == NULL) {
398 * We are at the end, insert it
401 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
402 sctp_log_strm_del(control, at,
403 SCTP_STR_LOG_FROM_INSERT_TL);
405 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
407 control->on_strm_q = SCTP_ON_UNORDERED;
409 control->on_strm_q = SCTP_ON_ORDERED;
420 sctp_abort_in_reasm(struct sctp_tcb *stcb,
421 struct sctp_queued_to_read *control,
422 struct sctp_tmit_chunk *chk,
423 int *abort_flag, int opspot)
425 char msg[SCTP_DIAG_INFO_LEN];
428 if (stcb->asoc.idata_supported) {
429 SCTP_SNPRINTF(msg, sizeof(msg),
430 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
432 control->fsn_included,
435 chk->rec.data.fsn, chk->rec.data.mid);
437 SCTP_SNPRINTF(msg, sizeof(msg),
438 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
440 control->fsn_included,
444 (uint16_t)chk->rec.data.mid);
446 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
447 sctp_m_freem(chk->data);
449 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
450 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
451 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
456 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
459 * The control could not be placed and must be cleaned.
461 struct sctp_tmit_chunk *chk, *nchk;
463 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
464 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
466 sctp_m_freem(chk->data);
468 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
470 sctp_free_remote_addr(control->whoFrom);
472 sctp_m_freem(control->data);
473 control->data = NULL;
475 sctp_free_a_readq(stcb, control);
479 * Queue the chunk either right into the socket buffer if it is the next one
480 * to go OR put it in the correct place in the delivery queue. If we do
481 * append to the so_buf, keep doing so until we are out of order as
482 * long as the control's entered are non-fragmented.
485 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
486 struct sctp_association *asoc,
487 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
490 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
491 * all the data in one stream this could happen quite rapidly. One
492 * could use the TSN to keep track of things, but this scheme breaks
493 * down in the other type of stream usage that could occur. Send a
494 * single msg to stream 0, send 4Billion messages to stream 1, now
495 * send a message to stream 0. You have a situation where the TSN
496 * has wrapped but not in the stream. Is this worth worrying about
497 * or should we just change our queue sort at the bottom to be by
500 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
501 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
502 * assignment this could happen... and I don't see how this would be
503 * a violation. So for now I am undecided an will leave the sort by
504 * SSN alone. Maybe a hybred approach is the answer
507 struct sctp_queued_to_read *at;
511 struct sctp_stream_in *strm;
512 char msg[SCTP_DIAG_INFO_LEN];
514 strm = &asoc->strmin[control->sinfo_stream];
515 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
516 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
518 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
519 /* The incoming sseq is behind where we last delivered? */
520 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
521 strm->last_mid_delivered, control->mid);
523 * throw it in the stream so it gets cleaned up in
524 * association destruction
526 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
527 if (asoc->idata_supported) {
528 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
529 strm->last_mid_delivered, control->sinfo_tsn,
530 control->sinfo_stream, control->mid);
532 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
533 (uint16_t)strm->last_mid_delivered,
535 control->sinfo_stream,
536 (uint16_t)control->mid);
538 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
539 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
540 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
546 asoc->size_on_all_streams += control->length;
547 sctp_ucount_incr(asoc->cnt_on_all_streams);
548 nxt_todel = strm->last_mid_delivered + 1;
549 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
550 /* can be delivered right away? */
551 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
552 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
554 /* EY it wont be queued if it could be delivered directly */
556 if (asoc->size_on_all_streams >= control->length) {
557 asoc->size_on_all_streams -= control->length;
560 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
562 asoc->size_on_all_streams = 0;
565 sctp_ucount_decr(asoc->cnt_on_all_streams);
566 strm->last_mid_delivered++;
567 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
568 sctp_add_to_readq(stcb->sctp_ep, stcb,
570 &stcb->sctp_socket->so_rcv, 1,
571 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
572 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
574 nxt_todel = strm->last_mid_delivered + 1;
575 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
576 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
577 if (control->on_strm_q == SCTP_ON_ORDERED) {
578 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
579 if (asoc->size_on_all_streams >= control->length) {
580 asoc->size_on_all_streams -= control->length;
583 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
585 asoc->size_on_all_streams = 0;
588 sctp_ucount_decr(asoc->cnt_on_all_streams);
591 panic("Huh control: %p is on_strm_q: %d",
592 control, control->on_strm_q);
595 control->on_strm_q = 0;
596 strm->last_mid_delivered++;
598 * We ignore the return of deliver_data here
599 * since we always can hold the chunk on the
600 * d-queue. And we have a finite number that
601 * can be delivered from the strq.
603 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
604 sctp_log_strm_del(control, NULL,
605 SCTP_STR_LOG_FROM_IMMED_DEL);
607 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
608 sctp_add_to_readq(stcb->sctp_ep, stcb,
610 &stcb->sctp_socket->so_rcv, 1,
611 SCTP_READ_LOCK_NOT_HELD,
614 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
622 * Ok, we did not deliver this guy, find the correct place
623 * to put it on the queue.
625 if (sctp_place_control_in_stream(strm, asoc, control)) {
626 SCTP_SNPRINTF(msg, sizeof(msg),
627 "Queue to str MID: %u duplicate", control->mid);
628 sctp_clean_up_control(stcb, control);
629 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
630 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
631 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
639 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
641 struct mbuf *m, *prev = NULL;
642 struct sctp_tcb *stcb;
644 stcb = control->stcb;
645 control->held_length = 0;
649 if (SCTP_BUF_LEN(m) == 0) {
650 /* Skip mbufs with NO length */
653 control->data = sctp_m_free(m);
656 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
657 m = SCTP_BUF_NEXT(prev);
660 control->tail_mbuf = prev;
665 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
666 if (control->on_read_q) {
668 * On read queue so we must increment the SB stuff,
669 * we assume caller has done any locks of SB.
671 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
673 m = SCTP_BUF_NEXT(m);
676 control->tail_mbuf = prev;
681 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
683 struct mbuf *prev = NULL;
684 struct sctp_tcb *stcb;
686 stcb = control->stcb;
689 panic("Control broken");
694 if (control->tail_mbuf == NULL) {
696 sctp_m_freem(control->data);
698 sctp_setup_tail_pointer(control);
701 control->tail_mbuf->m_next = m;
703 if (SCTP_BUF_LEN(m) == 0) {
704 /* Skip mbufs with NO length */
707 control->tail_mbuf->m_next = sctp_m_free(m);
708 m = control->tail_mbuf->m_next;
710 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
711 m = SCTP_BUF_NEXT(prev);
714 control->tail_mbuf = prev;
719 if (control->on_read_q) {
721 * On read queue so we must increment the SB stuff,
722 * we assume caller has done any locks of SB.
724 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
726 *added += SCTP_BUF_LEN(m);
727 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
728 m = SCTP_BUF_NEXT(m);
731 control->tail_mbuf = prev;
736 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
738 memset(nc, 0, sizeof(struct sctp_queued_to_read));
739 nc->sinfo_stream = control->sinfo_stream;
740 nc->mid = control->mid;
741 TAILQ_INIT(&nc->reasm);
742 nc->top_fsn = control->top_fsn;
743 nc->mid = control->mid;
744 nc->sinfo_flags = control->sinfo_flags;
745 nc->sinfo_ppid = control->sinfo_ppid;
746 nc->sinfo_context = control->sinfo_context;
747 nc->fsn_included = 0xffffffff;
748 nc->sinfo_tsn = control->sinfo_tsn;
749 nc->sinfo_cumtsn = control->sinfo_cumtsn;
750 nc->sinfo_assoc_id = control->sinfo_assoc_id;
751 nc->whoFrom = control->whoFrom;
752 atomic_add_int(&nc->whoFrom->ref_count, 1);
753 nc->stcb = control->stcb;
754 nc->port_from = control->port_from;
755 nc->do_not_ref_stcb = control->do_not_ref_stcb;
759 sctp_reset_a_control(struct sctp_queued_to_read *control,
760 struct sctp_inpcb *inp, uint32_t tsn)
762 control->fsn_included = tsn;
763 if (control->on_read_q) {
765 * We have to purge it from there, hopefully this will work
768 TAILQ_REMOVE(&inp->read_queue, control, next);
769 control->on_read_q = 0;
774 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
775 struct sctp_association *asoc,
776 struct sctp_stream_in *strm,
777 struct sctp_queued_to_read *control,
779 int inp_read_lock_held)
782 * Special handling for the old un-ordered data chunk. All the
783 * chunks/TSN's go to mid 0. So we have to do the old style watching
784 * to see if we have it all. If you return one, no other control
785 * entries on the un-ordered queue will be looked at. In theory
786 * there should be no others entries in reality, unless the guy is
787 * sending both unordered NDATA and unordered DATA...
789 struct sctp_tmit_chunk *chk, *lchk, *tchk;
791 struct sctp_queued_to_read *nc;
794 if (control->first_frag_seen == 0) {
795 /* Nothing we can do, we have not seen the first piece yet */
798 /* Collapse any we can */
801 fsn = control->fsn_included + 1;
802 /* Now what can we add? */
803 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
804 if (chk->rec.data.fsn == fsn) {
806 sctp_alloc_a_readq(stcb, nc);
810 memset(nc, 0, sizeof(struct sctp_queued_to_read));
811 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
812 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
816 if (control->end_added) {
818 if (!TAILQ_EMPTY(&control->reasm)) {
820 * Ok we have to move anything left
821 * on the control queue to a new
824 sctp_build_readq_entry_from_ctl(nc, control);
825 tchk = TAILQ_FIRST(&control->reasm);
826 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
827 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
828 if (asoc->size_on_reasm_queue >= tchk->send_size) {
829 asoc->size_on_reasm_queue -= tchk->send_size;
832 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
834 asoc->size_on_reasm_queue = 0;
837 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
838 nc->first_frag_seen = 1;
839 nc->fsn_included = tchk->rec.data.fsn;
840 nc->data = tchk->data;
841 nc->sinfo_ppid = tchk->rec.data.ppid;
842 nc->sinfo_tsn = tchk->rec.data.tsn;
843 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
845 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
846 sctp_setup_tail_pointer(nc);
847 tchk = TAILQ_FIRST(&control->reasm);
849 /* Spin the rest onto the queue */
851 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
852 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
853 tchk = TAILQ_FIRST(&control->reasm);
856 * Now lets add it to the queue
857 * after removing control
859 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
860 nc->on_strm_q = SCTP_ON_UNORDERED;
861 if (control->on_strm_q) {
862 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
863 control->on_strm_q = 0;
866 if (control->pdapi_started) {
867 strm->pd_api_started = 0;
868 control->pdapi_started = 0;
870 if (control->on_strm_q) {
871 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
872 control->on_strm_q = 0;
873 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
875 if (control->on_read_q == 0) {
876 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
877 &stcb->sctp_socket->so_rcv, control->end_added,
878 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
880 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
881 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
883 * Switch to the new guy and
889 if (nc->on_strm_q == 0) {
890 sctp_free_a_readq(stcb, nc);
895 sctp_free_a_readq(stcb, nc);
902 if (cnt_added && strm->pd_api_started) {
903 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
905 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
906 strm->pd_api_started = 1;
907 control->pdapi_started = 1;
908 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
909 &stcb->sctp_socket->so_rcv, control->end_added,
910 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
911 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
919 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
920 struct sctp_association *asoc,
921 struct sctp_queued_to_read *control,
922 struct sctp_tmit_chunk *chk,
925 struct sctp_tmit_chunk *at;
929 * Here we need to place the chunk into the control structure sorted
930 * in the correct order.
932 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
933 /* Its the very first one. */
934 SCTPDBG(SCTP_DEBUG_XXX,
935 "chunk is a first fsn: %u becomes fsn_included\n",
937 at = TAILQ_FIRST(&control->reasm);
938 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
940 * The first chunk in the reassembly is a smaller
941 * TSN than this one, even though this has a first,
942 * it must be from a subsequent msg.
946 if (control->first_frag_seen) {
948 * In old un-ordered we can reassembly on one
949 * control multiple messages. As long as the next
950 * FIRST is greater then the old first (TSN i.e. FSN
956 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
958 * Easy way the start of a new guy beyond
963 if ((chk->rec.data.fsn == control->fsn_included) ||
964 (control->pdapi_started)) {
966 * Ok this should not happen, if it does we
967 * started the pd-api on the higher TSN
968 * (since the equals part is a TSN failure
971 * We are completly hosed in that case since
972 * I have no way to recover. This really
973 * will only happen if we can get more TSN's
974 * higher before the pd-api-point.
976 sctp_abort_in_reasm(stcb, control, chk,
978 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
983 * Ok we have two firsts and the one we just got is
984 * smaller than the one we previously placed.. yuck!
985 * We must swap them out.
988 tdata = control->data;
989 control->data = chk->data;
991 /* Save the lengths */
992 chk->send_size = control->length;
993 /* Recompute length of control and tail pointer */
994 sctp_setup_tail_pointer(control);
995 /* Fix the FSN included */
996 tmp = control->fsn_included;
997 control->fsn_included = chk->rec.data.fsn;
998 chk->rec.data.fsn = tmp;
999 /* Fix the TSN included */
1000 tmp = control->sinfo_tsn;
1001 control->sinfo_tsn = chk->rec.data.tsn;
1002 chk->rec.data.tsn = tmp;
1003 /* Fix the PPID included */
1004 tmp = control->sinfo_ppid;
1005 control->sinfo_ppid = chk->rec.data.ppid;
1006 chk->rec.data.ppid = tmp;
1007 /* Fix tail pointer */
1010 control->first_frag_seen = 1;
1011 control->fsn_included = chk->rec.data.fsn;
1012 control->top_fsn = chk->rec.data.fsn;
1013 control->sinfo_tsn = chk->rec.data.tsn;
1014 control->sinfo_ppid = chk->rec.data.ppid;
1015 control->data = chk->data;
1016 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1018 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1019 sctp_setup_tail_pointer(control);
1024 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1025 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1027 * This one in queue is bigger than the new one,
1028 * insert the new one before at.
1030 asoc->size_on_reasm_queue += chk->send_size;
1031 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1033 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1035 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1037 * They sent a duplicate fsn number. This really
1038 * should not happen since the FSN is a TSN and it
1039 * should have been dropped earlier.
1041 sctp_abort_in_reasm(stcb, control, chk,
1043 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1048 if (inserted == 0) {
1049 /* Its at the end */
1050 asoc->size_on_reasm_queue += chk->send_size;
1051 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1052 control->top_fsn = chk->rec.data.fsn;
1053 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1058 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1059 struct sctp_stream_in *strm, int inp_read_lock_held)
1062 * Given a stream, strm, see if any of the SSN's on it that are
1063 * fragmented are ready to deliver. If so go ahead and place them on
1064 * the read queue. In so placing if we have hit the end, then we
1065 * need to remove them from the stream's queue.
1067 struct sctp_queued_to_read *control, *nctl = NULL;
1068 uint32_t next_to_del;
1072 if (stcb->sctp_socket) {
1073 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1074 stcb->sctp_ep->partial_delivery_point);
1076 pd_point = stcb->sctp_ep->partial_delivery_point;
1078 control = TAILQ_FIRST(&strm->uno_inqueue);
1080 if ((control != NULL) &&
1081 (asoc->idata_supported == 0)) {
1082 /* Special handling needed for "old" data format */
1083 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1087 if (strm->pd_api_started) {
1088 /* Can't add more */
1092 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1093 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1094 nctl = TAILQ_NEXT(control, next_instrm);
1095 if (control->end_added) {
1096 /* We just put the last bit on */
1097 if (control->on_strm_q) {
1099 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1100 panic("Huh control: %p on_q: %d -- not unordered?",
1101 control, control->on_strm_q);
1104 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1105 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1106 if (asoc->size_on_all_streams >= control->length) {
1107 asoc->size_on_all_streams -= control->length;
1110 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1112 asoc->size_on_all_streams = 0;
1115 sctp_ucount_decr(asoc->cnt_on_all_streams);
1116 control->on_strm_q = 0;
1118 if (control->on_read_q == 0) {
1119 sctp_add_to_readq(stcb->sctp_ep, stcb,
1121 &stcb->sctp_socket->so_rcv, control->end_added,
1122 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1125 /* Can we do a PD-API for this un-ordered guy? */
1126 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1127 strm->pd_api_started = 1;
1128 control->pdapi_started = 1;
1129 sctp_add_to_readq(stcb->sctp_ep, stcb,
1131 &stcb->sctp_socket->so_rcv, control->end_added,
1132 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1140 control = TAILQ_FIRST(&strm->inqueue);
1141 if (strm->pd_api_started) {
1142 /* Can't add more */
1145 if (control == NULL) {
1148 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1150 * Ok the guy at the top was being partially delivered
1151 * completed, so we remove it. Note the pd_api flag was
1152 * taken off when the chunk was merged on in
1153 * sctp_queue_data_for_reasm below.
1155 nctl = TAILQ_NEXT(control, next_instrm);
1156 SCTPDBG(SCTP_DEBUG_XXX,
1157 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1158 control, control->end_added, control->mid,
1159 control->top_fsn, control->fsn_included,
1160 strm->last_mid_delivered);
1161 if (control->end_added) {
1162 if (control->on_strm_q) {
1164 if (control->on_strm_q != SCTP_ON_ORDERED) {
1165 panic("Huh control: %p on_q: %d -- not ordered?",
1166 control, control->on_strm_q);
1169 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1170 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1171 if (asoc->size_on_all_streams >= control->length) {
1172 asoc->size_on_all_streams -= control->length;
1175 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1177 asoc->size_on_all_streams = 0;
1180 sctp_ucount_decr(asoc->cnt_on_all_streams);
1181 control->on_strm_q = 0;
1183 if (strm->pd_api_started && control->pdapi_started) {
1184 control->pdapi_started = 0;
1185 strm->pd_api_started = 0;
1187 if (control->on_read_q == 0) {
1188 sctp_add_to_readq(stcb->sctp_ep, stcb,
1190 &stcb->sctp_socket->so_rcv, control->end_added,
1191 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1196 if (strm->pd_api_started) {
1198 * Can't add more must have gotten an un-ordered above being
1199 * partially delivered.
1204 next_to_del = strm->last_mid_delivered + 1;
1206 SCTPDBG(SCTP_DEBUG_XXX,
1207 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1208 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1210 nctl = TAILQ_NEXT(control, next_instrm);
1211 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1212 (control->first_frag_seen)) {
1215 /* Ok we can deliver it onto the stream. */
1216 if (control->end_added) {
1217 /* We are done with it afterwards */
1218 if (control->on_strm_q) {
1220 if (control->on_strm_q != SCTP_ON_ORDERED) {
1221 panic("Huh control: %p on_q: %d -- not ordered?",
1222 control, control->on_strm_q);
1225 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1226 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1227 if (asoc->size_on_all_streams >= control->length) {
1228 asoc->size_on_all_streams -= control->length;
1231 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1233 asoc->size_on_all_streams = 0;
1236 sctp_ucount_decr(asoc->cnt_on_all_streams);
1237 control->on_strm_q = 0;
1241 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1243 * A singleton now slipping through - mark
1244 * it non-revokable too
1246 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1247 } else if (control->end_added == 0) {
1249 * Check if we can defer adding until its
1252 if ((control->length < pd_point) || (strm->pd_api_started)) {
1254 * Don't need it or cannot add more
1255 * (one being delivered that way)
1260 done = (control->end_added) && (control->last_frag_seen);
1261 if (control->on_read_q == 0) {
1263 if (asoc->size_on_all_streams >= control->length) {
1264 asoc->size_on_all_streams -= control->length;
1267 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1269 asoc->size_on_all_streams = 0;
1272 strm->pd_api_started = 1;
1273 control->pdapi_started = 1;
1275 sctp_add_to_readq(stcb->sctp_ep, stcb,
1277 &stcb->sctp_socket->so_rcv, control->end_added,
1278 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1280 strm->last_mid_delivered = next_to_del;
1293 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1294 struct sctp_stream_in *strm,
1295 struct sctp_tcb *stcb, struct sctp_association *asoc,
1296 struct sctp_tmit_chunk *chk, int hold_rlock)
1299 * Given a control and a chunk, merge the data from the chk onto the
1300 * control and free up the chunk resources.
1305 if (control->on_read_q && (hold_rlock == 0)) {
1307 * Its being pd-api'd so we must do some locks.
1309 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1312 if (control->data == NULL) {
1313 control->data = chk->data;
1314 sctp_setup_tail_pointer(control);
1316 sctp_add_to_tail_pointer(control, chk->data, &added);
1318 control->fsn_included = chk->rec.data.fsn;
1319 asoc->size_on_reasm_queue -= chk->send_size;
1320 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1321 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1323 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1324 control->first_frag_seen = 1;
1325 control->sinfo_tsn = chk->rec.data.tsn;
1326 control->sinfo_ppid = chk->rec.data.ppid;
1328 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1330 if ((control->on_strm_q) && (control->on_read_q)) {
1331 if (control->pdapi_started) {
1332 control->pdapi_started = 0;
1333 strm->pd_api_started = 0;
1335 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1337 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1338 control->on_strm_q = 0;
1339 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1341 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1343 * Don't need to decrement
1344 * size_on_all_streams, since control is on
1347 sctp_ucount_decr(asoc->cnt_on_all_streams);
1348 control->on_strm_q = 0;
1350 } else if (control->on_strm_q) {
1351 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1352 control->on_strm_q);
1356 control->end_added = 1;
1357 control->last_frag_seen = 1;
1360 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1362 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1367 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1368 * queue, see if anthing can be delivered. If so pull it off (or as much as
1369 * we can. If we run out of space then we must dump what we can and set the
1370 * appropriate flag to say we queued what we could.
1373 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1374 struct sctp_queued_to_read *control,
1375 struct sctp_tmit_chunk *chk,
1376 int created_control,
1377 int *abort_flag, uint32_t tsn)
1380 struct sctp_tmit_chunk *at, *nat;
1381 struct sctp_stream_in *strm;
1382 int do_wakeup, unordered;
1385 strm = &asoc->strmin[control->sinfo_stream];
1387 * For old un-ordered data chunks.
1389 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1394 /* Must be added to the stream-in queue */
1395 if (created_control) {
1396 if ((unordered == 0) || (asoc->idata_supported)) {
1397 sctp_ucount_incr(asoc->cnt_on_all_streams);
1399 if (sctp_place_control_in_stream(strm, asoc, control)) {
1400 /* Duplicate SSN? */
1401 sctp_abort_in_reasm(stcb, control, chk,
1403 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1404 sctp_clean_up_control(stcb, control);
1407 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1409 * Ok we created this control and now lets validate
1410 * that its legal i.e. there is a B bit set, if not
1411 * and we have up to the cum-ack then its invalid.
1413 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1414 sctp_abort_in_reasm(stcb, control, chk,
1416 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1421 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1422 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1426 * Ok we must queue the chunk into the reasembly portion: o if its
1427 * the first it goes to the control mbuf. o if its not first but the
1428 * next in sequence it goes to the control, and each succeeding one
1429 * in order also goes. o if its not in order we place it on the list
1432 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1433 /* Its the very first one. */
1434 SCTPDBG(SCTP_DEBUG_XXX,
1435 "chunk is a first fsn: %u becomes fsn_included\n",
1437 if (control->first_frag_seen) {
1439 * Error on senders part, they either sent us two
1440 * data chunks with FIRST, or they sent two
1441 * un-ordered chunks that were fragmented at the
1442 * same time in the same stream.
1444 sctp_abort_in_reasm(stcb, control, chk,
1446 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1449 control->first_frag_seen = 1;
1450 control->sinfo_ppid = chk->rec.data.ppid;
1451 control->sinfo_tsn = chk->rec.data.tsn;
1452 control->fsn_included = chk->rec.data.fsn;
1453 control->data = chk->data;
1454 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1456 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1457 sctp_setup_tail_pointer(control);
1458 asoc->size_on_all_streams += control->length;
1460 /* Place the chunk in our list */
1463 if (control->last_frag_seen == 0) {
1464 /* Still willing to raise highest FSN seen */
1465 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1466 SCTPDBG(SCTP_DEBUG_XXX,
1467 "We have a new top_fsn: %u\n",
1469 control->top_fsn = chk->rec.data.fsn;
1471 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1472 SCTPDBG(SCTP_DEBUG_XXX,
1473 "The last fsn is now in place fsn: %u\n",
1475 control->last_frag_seen = 1;
1476 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1477 SCTPDBG(SCTP_DEBUG_XXX,
1478 "New fsn: %u is not at top_fsn: %u -- abort\n",
1481 sctp_abort_in_reasm(stcb, control, chk,
1483 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1487 if (asoc->idata_supported || control->first_frag_seen) {
1489 * For IDATA we always check since we know
1490 * that the first fragment is 0. For old
1491 * DATA we have to receive the first before
1492 * we know the first FSN (which is the TSN).
1494 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1496 * We have already delivered up to
1499 sctp_abort_in_reasm(stcb, control, chk,
1501 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1506 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1507 /* Second last? huh? */
1508 SCTPDBG(SCTP_DEBUG_XXX,
1509 "Duplicate last fsn: %u (top: %u) -- abort\n",
1510 chk->rec.data.fsn, control->top_fsn);
1511 sctp_abort_in_reasm(stcb, control,
1513 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1516 if (asoc->idata_supported || control->first_frag_seen) {
1518 * For IDATA we always check since we know
1519 * that the first fragment is 0. For old
1520 * DATA we have to receive the first before
1521 * we know the first FSN (which is the TSN).
1524 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1526 * We have already delivered up to
1529 SCTPDBG(SCTP_DEBUG_XXX,
1530 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1531 chk->rec.data.fsn, control->fsn_included);
1532 sctp_abort_in_reasm(stcb, control, chk,
1534 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1539 * validate not beyond top FSN if we have seen last
1542 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1543 SCTPDBG(SCTP_DEBUG_XXX,
1544 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1547 sctp_abort_in_reasm(stcb, control, chk,
1549 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1554 * If we reach here, we need to place the new chunk in the
1555 * reassembly for this control.
1557 SCTPDBG(SCTP_DEBUG_XXX,
1558 "chunk is a not first fsn: %u needs to be inserted\n",
1560 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1561 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1562 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1563 /* Last not at the end? huh? */
1564 SCTPDBG(SCTP_DEBUG_XXX,
1565 "Last fragment not last in list: -- abort\n");
1566 sctp_abort_in_reasm(stcb, control,
1568 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1572 * This one in queue is bigger than the new
1573 * one, insert the new one before at.
1575 SCTPDBG(SCTP_DEBUG_XXX,
1576 "Insert it before fsn: %u\n",
1578 asoc->size_on_reasm_queue += chk->send_size;
1579 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1580 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1583 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1585 * Gak, He sent me a duplicate str seq
1589 * foo bar, I guess I will just free this
1590 * new guy, should we abort too? FIX ME
1591 * MAYBE? Or it COULD be that the SSN's have
1592 * wrapped. Maybe I should compare to TSN
1593 * somehow... sigh for now just blow away
1596 SCTPDBG(SCTP_DEBUG_XXX,
1597 "Duplicate to fsn: %u -- abort\n",
1599 sctp_abort_in_reasm(stcb, control,
1601 SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1605 if (inserted == 0) {
1606 /* Goes on the end */
1607 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1609 asoc->size_on_reasm_queue += chk->send_size;
1610 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1611 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1615 * Ok lets see if we can suck any up into the control structure that
1616 * are in seq if it makes sense.
1620 * If the first fragment has not been seen there is no sense in
1623 if (control->first_frag_seen) {
1624 next_fsn = control->fsn_included + 1;
1625 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1626 if (at->rec.data.fsn == next_fsn) {
1627 /* We can add this one now to the control */
1628 SCTPDBG(SCTP_DEBUG_XXX,
1629 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1632 next_fsn, control->fsn_included);
1633 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1634 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1635 if (control->on_read_q) {
1639 * We only add to the
1640 * size-on-all-streams if its not on
1641 * the read q. The read q flag will
1642 * cause a sballoc so its accounted
1645 asoc->size_on_all_streams += lenadded;
1648 if (control->end_added && control->pdapi_started) {
1649 if (strm->pd_api_started) {
1650 strm->pd_api_started = 0;
1651 control->pdapi_started = 0;
1653 if (control->on_read_q == 0) {
1654 sctp_add_to_readq(stcb->sctp_ep, stcb,
1656 &stcb->sctp_socket->so_rcv, control->end_added,
1657 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1667 /* Need to wakeup the reader */
1668 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1672 static struct sctp_queued_to_read *
1673 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1675 struct sctp_queued_to_read *control;
1678 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1679 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1684 if (idata_supported) {
1685 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1686 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1691 control = TAILQ_FIRST(&strm->uno_inqueue);
1698 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1699 struct mbuf **m, int offset, int chk_length,
1700 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1701 int *break_flag, int last_chunk, uint8_t chk_type)
1703 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1704 struct sctp_stream_in *strm;
1705 uint32_t tsn, fsn, gap, mid;
1708 int need_reasm_check = 0;
1710 struct mbuf *op_err;
1711 char msg[SCTP_DIAG_INFO_LEN];
1712 struct sctp_queued_to_read *control, *ncontrol;
1715 struct sctp_stream_reset_list *liste;
1718 int created_control = 0;
1720 if (chk_type == SCTP_IDATA) {
1721 struct sctp_idata_chunk *chunk, chunk_buf;
1723 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1724 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1725 chk_flags = chunk->ch.chunk_flags;
1726 clen = sizeof(struct sctp_idata_chunk);
1727 tsn = ntohl(chunk->dp.tsn);
1728 sid = ntohs(chunk->dp.sid);
1729 mid = ntohl(chunk->dp.mid);
1730 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1732 ppid = chunk->dp.ppid_fsn.ppid;
1734 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1735 ppid = 0xffffffff; /* Use as an invalid value. */
1738 struct sctp_data_chunk *chunk, chunk_buf;
1740 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1741 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1742 chk_flags = chunk->ch.chunk_flags;
1743 clen = sizeof(struct sctp_data_chunk);
1744 tsn = ntohl(chunk->dp.tsn);
1745 sid = ntohs(chunk->dp.sid);
1746 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1748 ppid = chunk->dp.ppid;
1750 if ((size_t)chk_length == clen) {
1752 * Need to send an abort since we had a empty data chunk.
1754 op_err = sctp_generate_no_user_data_cause(tsn);
1755 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1756 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1760 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1761 asoc->send_sack = 1;
1763 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1764 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1765 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1770 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1771 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1772 /* It is a duplicate */
1773 SCTP_STAT_INCR(sctps_recvdupdata);
1774 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1775 /* Record a dup for the next outbound sack */
1776 asoc->dup_tsns[asoc->numduptsns] = tsn;
1779 asoc->send_sack = 1;
1782 /* Calculate the number of TSN's between the base and this TSN */
1783 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1784 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1785 /* Can't hold the bit in the mapping at max array, toss it */
1788 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1789 SCTP_TCB_LOCK_ASSERT(stcb);
1790 if (sctp_expand_mapping_array(asoc, gap)) {
1791 /* Can't expand, drop it */
1795 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1798 /* See if we have received this one already */
1799 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1800 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1801 SCTP_STAT_INCR(sctps_recvdupdata);
1802 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1803 /* Record a dup for the next outbound sack */
1804 asoc->dup_tsns[asoc->numduptsns] = tsn;
1807 asoc->send_sack = 1;
1811 * Check to see about the GONE flag, duplicates would cause a sack
1812 * to be sent up above
1814 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1815 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1816 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1818 * wait a minute, this guy is gone, there is no longer a
1819 * receiver. Send peer an ABORT!
1821 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1822 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1827 * Now before going further we see if there is room. If NOT then we
1828 * MAY let one through only IF this TSN is the one we are waiting
1829 * for on a partial delivery API.
1832 /* Is the stream valid? */
1833 if (sid >= asoc->streamincnt) {
1834 struct sctp_error_invalid_stream *cause;
1836 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1837 0, M_NOWAIT, 1, MT_DATA);
1838 if (op_err != NULL) {
1839 /* add some space up front so prepend will work well */
1840 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1841 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1843 * Error causes are just param's and this one has
1844 * two back to back phdr, one with the error type
1845 * and size, the other with the streamid and a rsvd
1847 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1848 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1849 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1850 cause->stream_id = htons(sid);
1851 cause->reserved = htons(0);
1852 sctp_queue_op_err(stcb, op_err);
1854 SCTP_STAT_INCR(sctps_badsid);
1855 SCTP_TCB_LOCK_ASSERT(stcb);
1856 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1857 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1858 asoc->highest_tsn_inside_nr_map = tsn;
1860 if (tsn == (asoc->cumulative_tsn + 1)) {
1861 /* Update cum-ack */
1862 asoc->cumulative_tsn = tsn;
1867 * If its a fragmented message, lets see if we can find the control
1868 * on the reassembly queues.
1870 if ((chk_type == SCTP_IDATA) &&
1871 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1874 * The first *must* be fsn 0, and other (middle/end) pieces
1875 * can *not* be fsn 0. XXX: This can happen in case of a
1876 * wrap around. Ignore is for now.
1878 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1881 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1882 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1883 chk_flags, control);
1884 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1885 /* See if we can find the re-assembly entity */
1886 if (control != NULL) {
1887 /* We found something, does it belong? */
1888 if (ordered && (mid != control->mid)) {
1889 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1891 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1892 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1893 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1897 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1899 * We can't have a switched order with an
1902 SCTP_SNPRINTF(msg, sizeof(msg),
1903 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1907 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1909 * We can't have a switched unordered with a
1912 SCTP_SNPRINTF(msg, sizeof(msg),
1913 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1920 * Its a complete segment. Lets validate we don't have a
1921 * re-assembly going on with the same Stream/Seq (for
1922 * ordered) or in the same Stream for unordered.
1924 if (control != NULL) {
1925 if (ordered || asoc->idata_supported) {
1926 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1928 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1931 if ((tsn == control->fsn_included + 1) &&
1932 (control->end_added == 0)) {
1933 SCTP_SNPRINTF(msg, sizeof(msg),
1934 "Illegal message sequence, missing end for MID: %8.8x",
1935 control->fsn_included);
1943 /* now do the tests */
1944 if (((asoc->cnt_on_all_streams +
1945 asoc->cnt_on_reasm_queue +
1946 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1947 (((int)asoc->my_rwnd) <= 0)) {
1949 * When we have NO room in the rwnd we check to make sure
1950 * the reader is doing its job...
1952 if (stcb->sctp_socket->so_rcv.sb_cc) {
1953 /* some to read, wake-up */
1954 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1956 /* now is it in the mapping array of what we have accepted? */
1957 if (chk_type == SCTP_DATA) {
1958 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1959 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1960 /* Nope not in the valid range dump it */
1962 sctp_set_rwnd(stcb, asoc);
1963 if ((asoc->cnt_on_all_streams +
1964 asoc->cnt_on_reasm_queue +
1965 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1966 SCTP_STAT_INCR(sctps_datadropchklmt);
1968 SCTP_STAT_INCR(sctps_datadroprwnd);
1974 if (control == NULL) {
1977 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1982 #ifdef SCTP_ASOCLOG_OF_TSNS
1983 SCTP_TCB_LOCK_ASSERT(stcb);
1984 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1985 asoc->tsn_in_at = 0;
1986 asoc->tsn_in_wrapped = 1;
1988 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1989 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1990 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1991 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1992 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1993 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1994 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1995 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1999 * Before we continue lets validate that we are not being fooled by
2000 * an evil attacker. We can only have Nk chunks based on our TSN
2001 * spread allowed by the mapping array N * 8 bits, so there is no
2002 * way our stream sequence numbers could have wrapped. We of course
2003 * only validate the FIRST fragment so the bit must be set.
2005 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2006 (TAILQ_EMPTY(&asoc->resetHead)) &&
2007 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2008 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2009 /* The incoming sseq is behind where we last delivered? */
2010 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2011 mid, asoc->strmin[sid].last_mid_delivered);
2013 if (asoc->idata_supported) {
2014 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2015 asoc->strmin[sid].last_mid_delivered,
2020 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2021 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2026 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2027 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2028 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2032 if (chk_type == SCTP_IDATA) {
2033 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2035 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2037 if (last_chunk == 0) {
2038 if (chk_type == SCTP_IDATA) {
2039 dmbuf = SCTP_M_COPYM(*m,
2040 (offset + sizeof(struct sctp_idata_chunk)),
2043 dmbuf = SCTP_M_COPYM(*m,
2044 (offset + sizeof(struct sctp_data_chunk)),
2047 #ifdef SCTP_MBUF_LOGGING
2048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2049 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2053 /* We can steal the last chunk */
2057 /* lop off the top part */
2058 if (chk_type == SCTP_IDATA) {
2059 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2061 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2063 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2064 l_len = SCTP_BUF_LEN(dmbuf);
2067 * need to count up the size hopefully does not hit
2073 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2074 l_len += SCTP_BUF_LEN(lat);
2077 if (l_len > the_len) {
2078 /* Trim the end round bytes off too */
2079 m_adj(dmbuf, -(l_len - the_len));
2082 if (dmbuf == NULL) {
2083 SCTP_STAT_INCR(sctps_nomem);
2087 * Now no matter what, we need a control, get one if we don't have
2088 * one (we may have gotten it above when we found the message was
2091 if (control == NULL) {
2092 sctp_alloc_a_readq(stcb, control);
2093 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2098 if (control == NULL) {
2099 SCTP_STAT_INCR(sctps_nomem);
2102 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2105 control->data = dmbuf;
2106 control->tail_mbuf = NULL;
2107 for (mm = control->data; mm; mm = mm->m_next) {
2108 control->length += SCTP_BUF_LEN(mm);
2109 if (SCTP_BUF_NEXT(mm) == NULL) {
2110 control->tail_mbuf = mm;
2113 control->end_added = 1;
2114 control->last_frag_seen = 1;
2115 control->first_frag_seen = 1;
2116 control->fsn_included = fsn;
2117 control->top_fsn = fsn;
2119 created_control = 1;
2121 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2122 chk_flags, ordered, mid, control);
2123 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2124 TAILQ_EMPTY(&asoc->resetHead) &&
2126 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2127 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2128 /* Candidate for express delivery */
2130 * Its not fragmented, No PD-API is up, Nothing in the
2131 * delivery queue, Its un-ordered OR ordered and the next to
2132 * deliver AND nothing else is stuck on the stream queue,
2133 * And there is room for it in the socket buffer. Lets just
2134 * stuff it up the buffer....
2136 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2137 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2138 asoc->highest_tsn_inside_nr_map = tsn;
2140 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2143 sctp_add_to_readq(stcb->sctp_ep, stcb,
2144 control, &stcb->sctp_socket->so_rcv,
2145 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2147 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2148 /* for ordered, bump what we delivered */
2149 asoc->strmin[sid].last_mid_delivered++;
2151 SCTP_STAT_INCR(sctps_recvexpress);
2152 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2153 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2154 SCTP_STR_LOG_FROM_EXPRS_DEL);
2157 goto finish_express_del;
2160 /* Now will we need a chunk too? */
2161 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2162 sctp_alloc_a_chunk(stcb, chk);
2164 /* No memory so we drop the chunk */
2165 SCTP_STAT_INCR(sctps_nomem);
2166 if (last_chunk == 0) {
2167 /* we copied it, free the copy */
2168 sctp_m_freem(dmbuf);
2172 chk->rec.data.tsn = tsn;
2173 chk->no_fr_allowed = 0;
2174 chk->rec.data.fsn = fsn;
2175 chk->rec.data.mid = mid;
2176 chk->rec.data.sid = sid;
2177 chk->rec.data.ppid = ppid;
2178 chk->rec.data.context = stcb->asoc.context;
2179 chk->rec.data.doing_fast_retransmit = 0;
2180 chk->rec.data.rcv_flags = chk_flags;
2182 chk->send_size = the_len;
2184 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2187 atomic_add_int(&net->ref_count, 1);
2190 /* Set the appropriate TSN mark */
2191 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2192 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2193 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2194 asoc->highest_tsn_inside_nr_map = tsn;
2197 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2198 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2199 asoc->highest_tsn_inside_map = tsn;
2202 /* Now is it complete (i.e. not fragmented)? */
2203 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2205 * Special check for when streams are resetting. We could be
2206 * more smart about this and check the actual stream to see
2207 * if it is not being reset.. that way we would not create a
2208 * HOLB when amongst streams being reset and those not being
2212 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2213 SCTP_TSN_GT(tsn, liste->tsn)) {
2215 * yep its past where we need to reset... go ahead
2218 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2220 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2222 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2223 unsigned char inserted = 0;
2225 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2226 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2231 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2236 if (inserted == 0) {
2238 * must be put at end, use prevP
2239 * (all setup from loop) to setup
2242 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2245 goto finish_express_del;
2247 if (chk_flags & SCTP_DATA_UNORDERED) {
2248 /* queue directly into socket buffer */
2249 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2251 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2252 sctp_add_to_readq(stcb->sctp_ep, stcb,
2254 &stcb->sctp_socket->so_rcv, 1,
2255 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2258 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2260 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2268 goto finish_express_del;
2270 /* If we reach here its a reassembly */
2271 need_reasm_check = 1;
2272 SCTPDBG(SCTP_DEBUG_XXX,
2273 "Queue data to stream for reasm control: %p MID: %u\n",
2275 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2278 * the assoc is now gone and chk was put onto the reasm
2279 * queue, which has all been freed.
2287 /* Here we tidy up things */
2288 if (tsn == (asoc->cumulative_tsn + 1)) {
2289 /* Update cum-ack */
2290 asoc->cumulative_tsn = tsn;
2296 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2298 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2300 SCTP_STAT_INCR(sctps_recvdata);
2301 /* Set it present please */
2302 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2303 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2305 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2306 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2307 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2309 if (need_reasm_check) {
2310 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2311 need_reasm_check = 0;
2313 /* check the special flag for stream resets */
2314 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2315 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2317 * we have finished working through the backlogged TSN's now
2318 * time to reset streams. 1: call reset function. 2: free
2319 * pending_reply space 3: distribute any chunks in
2320 * pending_reply_queue.
2322 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2323 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2324 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2325 SCTP_FREE(liste, SCTP_M_STRESET);
2326 /* sa_ignore FREED_MEMORY */
2327 liste = TAILQ_FIRST(&asoc->resetHead);
2328 if (TAILQ_EMPTY(&asoc->resetHead)) {
2329 /* All can be removed */
2330 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2331 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2332 strm = &asoc->strmin[control->sinfo_stream];
2333 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2337 if (need_reasm_check) {
2338 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2339 need_reasm_check = 0;
2343 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2344 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2348 * if control->sinfo_tsn is <= liste->tsn we
2349 * can process it which is the NOT of
2350 * control->sinfo_tsn > liste->tsn
2352 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2353 strm = &asoc->strmin[control->sinfo_stream];
2354 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2358 if (need_reasm_check) {
2359 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2360 need_reasm_check = 0;
2368 static const int8_t sctp_map_lookup_tab[256] = {
2369 0, 1, 0, 2, 0, 1, 0, 3,
2370 0, 1, 0, 2, 0, 1, 0, 4,
2371 0, 1, 0, 2, 0, 1, 0, 3,
2372 0, 1, 0, 2, 0, 1, 0, 5,
2373 0, 1, 0, 2, 0, 1, 0, 3,
2374 0, 1, 0, 2, 0, 1, 0, 4,
2375 0, 1, 0, 2, 0, 1, 0, 3,
2376 0, 1, 0, 2, 0, 1, 0, 6,
2377 0, 1, 0, 2, 0, 1, 0, 3,
2378 0, 1, 0, 2, 0, 1, 0, 4,
2379 0, 1, 0, 2, 0, 1, 0, 3,
2380 0, 1, 0, 2, 0, 1, 0, 5,
2381 0, 1, 0, 2, 0, 1, 0, 3,
2382 0, 1, 0, 2, 0, 1, 0, 4,
2383 0, 1, 0, 2, 0, 1, 0, 3,
2384 0, 1, 0, 2, 0, 1, 0, 7,
2385 0, 1, 0, 2, 0, 1, 0, 3,
2386 0, 1, 0, 2, 0, 1, 0, 4,
2387 0, 1, 0, 2, 0, 1, 0, 3,
2388 0, 1, 0, 2, 0, 1, 0, 5,
2389 0, 1, 0, 2, 0, 1, 0, 3,
2390 0, 1, 0, 2, 0, 1, 0, 4,
2391 0, 1, 0, 2, 0, 1, 0, 3,
2392 0, 1, 0, 2, 0, 1, 0, 6,
2393 0, 1, 0, 2, 0, 1, 0, 3,
2394 0, 1, 0, 2, 0, 1, 0, 4,
2395 0, 1, 0, 2, 0, 1, 0, 3,
2396 0, 1, 0, 2, 0, 1, 0, 5,
2397 0, 1, 0, 2, 0, 1, 0, 3,
2398 0, 1, 0, 2, 0, 1, 0, 4,
2399 0, 1, 0, 2, 0, 1, 0, 3,
2400 0, 1, 0, 2, 0, 1, 0, 8
2405 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2408 * Now we also need to check the mapping array in a couple of ways.
2409 * 1) Did we move the cum-ack point?
2411 * When you first glance at this you might think that all entries
2412 * that make up the position of the cum-ack would be in the
2413 * nr-mapping array only.. i.e. things up to the cum-ack are always
2414 * deliverable. Thats true with one exception, when its a fragmented
2415 * message we may not deliver the data until some threshold (or all
2416 * of it) is in place. So we must OR the nr_mapping_array and
2417 * mapping_array to get a true picture of the cum-ack.
2419 struct sctp_association *asoc;
2422 int slide_from, slide_end, lgap, distance;
2423 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2427 old_cumack = asoc->cumulative_tsn;
2428 old_base = asoc->mapping_array_base_tsn;
2429 old_highest = asoc->highest_tsn_inside_map;
2431 * We could probably improve this a small bit by calculating the
2432 * offset of the current cum-ack as the starting point.
2435 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2436 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2440 /* there is a 0 bit */
2441 at += sctp_map_lookup_tab[val];
2445 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2447 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2448 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2450 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2451 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2453 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2454 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2455 sctp_print_mapping_array(asoc);
2456 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2457 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2459 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2460 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2463 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2464 highest_tsn = asoc->highest_tsn_inside_nr_map;
2466 highest_tsn = asoc->highest_tsn_inside_map;
2468 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2469 /* The complete array was completed by a single FR */
2470 /* highest becomes the cum-ack */
2476 /* clear the array */
2477 clr = ((at + 7) >> 3);
2478 if (clr > asoc->mapping_array_size) {
2479 clr = asoc->mapping_array_size;
2481 memset(asoc->mapping_array, 0, clr);
2482 memset(asoc->nr_mapping_array, 0, clr);
2484 for (i = 0; i < asoc->mapping_array_size; i++) {
2485 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2486 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2487 sctp_print_mapping_array(asoc);
2491 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2492 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2493 } else if (at >= 8) {
2494 /* we can slide the mapping array down */
2495 /* slide_from holds where we hit the first NON 0xff byte */
2498 * now calculate the ceiling of the move using our highest
2501 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2502 slide_end = (lgap >> 3);
2503 if (slide_end < slide_from) {
2504 sctp_print_mapping_array(asoc);
2506 panic("impossible slide");
2508 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2509 lgap, slide_end, slide_from, at);
2513 if (slide_end > asoc->mapping_array_size) {
2515 panic("would overrun buffer");
2517 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2518 asoc->mapping_array_size, slide_end);
2519 slide_end = asoc->mapping_array_size;
2522 distance = (slide_end - slide_from) + 1;
2523 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2524 sctp_log_map(old_base, old_cumack, old_highest,
2525 SCTP_MAP_PREPARE_SLIDE);
2526 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2527 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2529 if (distance + slide_from > asoc->mapping_array_size ||
2532 * Here we do NOT slide forward the array so that
2533 * hopefully when more data comes in to fill it up
2534 * we will be able to slide it forward. Really I
2535 * don't think this should happen :-0
2537 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2538 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2539 (uint32_t)asoc->mapping_array_size,
2540 SCTP_MAP_SLIDE_NONE);
2545 for (ii = 0; ii < distance; ii++) {
2546 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2547 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2550 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2551 asoc->mapping_array[ii] = 0;
2552 asoc->nr_mapping_array[ii] = 0;
2554 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2555 asoc->highest_tsn_inside_map += (slide_from << 3);
2557 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2558 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2560 asoc->mapping_array_base_tsn += (slide_from << 3);
2561 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2562 sctp_log_map(asoc->mapping_array_base_tsn,
2563 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2564 SCTP_MAP_SLIDE_RESULT);
2571 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2573 struct sctp_association *asoc;
2574 uint32_t highest_tsn;
2577 sctp_slide_mapping_arrays(stcb);
2579 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2580 highest_tsn = asoc->highest_tsn_inside_nr_map;
2582 highest_tsn = asoc->highest_tsn_inside_map;
2584 /* Is there a gap now? */
2585 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2588 * Now we need to see if we need to queue a sack or just start the
2589 * timer (if allowed).
2591 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2593 * Ok special case, in SHUTDOWN-SENT case. here we maker
2594 * sure SACK timer is off and instead send a SHUTDOWN and a
2597 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2598 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2599 stcb->sctp_ep, stcb, NULL,
2600 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2602 sctp_send_shutdown(stcb,
2603 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2605 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2609 * CMT DAC algorithm: increase number of packets received
2612 stcb->asoc.cmt_dac_pkts_rcvd++;
2614 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2616 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2618 (stcb->asoc.numduptsns) || /* we have dup's */
2619 (is_a_gap) || /* is still a gap */
2620 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2621 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */
2623 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2624 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2625 (stcb->asoc.send_sack == 0) &&
2626 (stcb->asoc.numduptsns == 0) &&
2627 (stcb->asoc.delayed_ack) &&
2628 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2631 * CMT DAC algorithm: With CMT, delay acks
2632 * even in the face of reordering.
2633 * Therefore, if acks that do not have to be
2634 * sent because of the above reasons, will
2635 * be delayed. That is, acks that would have
2636 * been sent due to gap reports will be
2637 * delayed with DAC. Start the delayed ack
2640 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2641 stcb->sctp_ep, stcb, NULL);
2644 * Ok we must build a SACK since the timer
2645 * is pending, we got our first packet OR
2646 * there are gaps or duplicates.
2648 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2649 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2650 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2653 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2654 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2655 stcb->sctp_ep, stcb, NULL);
2662 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2663 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2664 struct sctp_nets *net, uint32_t *high_tsn)
2666 struct sctp_chunkhdr *ch, chunk_buf;
2667 struct sctp_association *asoc;
2668 int num_chunks = 0; /* number of control chunks processed */
2670 int break_flag, last_chunk;
2671 int abort_flag = 0, was_a_gap;
2673 uint32_t highest_tsn;
2674 uint16_t chk_length;
2677 sctp_set_rwnd(stcb, &stcb->asoc);
2680 SCTP_TCB_LOCK_ASSERT(stcb);
2682 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2683 highest_tsn = asoc->highest_tsn_inside_nr_map;
2685 highest_tsn = asoc->highest_tsn_inside_map;
2687 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2689 * setup where we got the last DATA packet from for any SACK that
2690 * may need to go out. Don't bump the net. This is done ONLY when a
2691 * chunk is assigned.
2693 asoc->last_data_chunk_from = net;
2696 * Now before we proceed we must figure out if this is a wasted
2697 * cluster... i.e. it is a small packet sent in and yet the driver
2698 * underneath allocated a full cluster for it. If so we must copy it
2699 * to a smaller mbuf and free up the cluster mbuf. This will help
2700 * with cluster starvation.
2702 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2703 /* we only handle mbufs that are singletons.. not chains */
2704 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2706 /* ok lets see if we can copy the data up */
2709 /* get the pointers and copy */
2710 to = mtod(m, caddr_t *);
2711 from = mtod((*mm), caddr_t *);
2712 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2713 /* copy the length and free up the old */
2714 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2716 /* success, back copy */
2719 /* We are in trouble in the mbuf world .. yikes */
2723 /* get pointer to the first chunk header */
2724 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2725 sizeof(struct sctp_chunkhdr),
2726 (uint8_t *)&chunk_buf);
2731 * process all DATA chunks...
2733 *high_tsn = asoc->cumulative_tsn;
2735 asoc->data_pkts_seen++;
2736 while (stop_proc == 0) {
2737 /* validate chunk length */
2738 chk_length = ntohs(ch->chunk_length);
2739 if (length - *offset < chk_length) {
2740 /* all done, mutulated chunk */
2744 if ((asoc->idata_supported == 1) &&
2745 (ch->chunk_type == SCTP_DATA)) {
2746 struct mbuf *op_err;
2747 char msg[SCTP_DIAG_INFO_LEN];
2749 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2750 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2751 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2752 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2755 if ((asoc->idata_supported == 0) &&
2756 (ch->chunk_type == SCTP_IDATA)) {
2757 struct mbuf *op_err;
2758 char msg[SCTP_DIAG_INFO_LEN];
2760 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2761 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2762 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2763 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2766 if ((ch->chunk_type == SCTP_DATA) ||
2767 (ch->chunk_type == SCTP_IDATA)) {
2770 if (ch->chunk_type == SCTP_DATA) {
2771 clen = sizeof(struct sctp_data_chunk);
2773 clen = sizeof(struct sctp_idata_chunk);
2775 if (chk_length < clen) {
2777 * Need to send an abort since we had a
2778 * invalid data chunk.
2780 struct mbuf *op_err;
2781 char msg[SCTP_DIAG_INFO_LEN];
2783 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2784 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2786 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2787 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2788 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2791 #ifdef SCTP_AUDITING_ENABLED
2792 sctp_audit_log(0xB1, 0);
2794 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2799 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2800 chk_length, net, high_tsn, &abort_flag, &break_flag,
2801 last_chunk, ch->chunk_type)) {
2809 * Set because of out of rwnd space and no
2810 * drop rep space left.
2816 /* not a data chunk in the data region */
2817 switch (ch->chunk_type) {
2818 case SCTP_INITIATION:
2819 case SCTP_INITIATION_ACK:
2820 case SCTP_SELECTIVE_ACK:
2821 case SCTP_NR_SELECTIVE_ACK:
2822 case SCTP_HEARTBEAT_REQUEST:
2823 case SCTP_HEARTBEAT_ACK:
2824 case SCTP_ABORT_ASSOCIATION:
2826 case SCTP_SHUTDOWN_ACK:
2827 case SCTP_OPERATION_ERROR:
2828 case SCTP_COOKIE_ECHO:
2829 case SCTP_COOKIE_ACK:
2832 case SCTP_SHUTDOWN_COMPLETE:
2833 case SCTP_AUTHENTICATION:
2834 case SCTP_ASCONF_ACK:
2835 case SCTP_PACKET_DROPPED:
2836 case SCTP_STREAM_RESET:
2837 case SCTP_FORWARD_CUM_TSN:
2841 * Now, what do we do with KNOWN
2842 * chunks that are NOT in the right
2845 * For now, I do nothing but ignore
2846 * them. We may later want to add
2847 * sysctl stuff to switch out and do
2848 * either an ABORT() or possibly
2851 struct mbuf *op_err;
2852 char msg[SCTP_DIAG_INFO_LEN];
2854 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2856 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2857 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2862 * Unknown chunk type: use bit rules after
2865 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2867 * Need to send an abort since we
2868 * had a invalid chunk.
2870 struct mbuf *op_err;
2871 char msg[SCTP_DIAG_INFO_LEN];
2873 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2874 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2875 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2876 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2879 if (ch->chunk_type & 0x40) {
2880 /* Add a error report to the queue */
2881 struct mbuf *op_err;
2882 struct sctp_gen_error_cause *cause;
2884 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2885 0, M_NOWAIT, 1, MT_DATA);
2886 if (op_err != NULL) {
2887 cause = mtod(op_err, struct sctp_gen_error_cause *);
2888 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2889 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2890 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2891 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2892 if (SCTP_BUF_NEXT(op_err) != NULL) {
2893 sctp_queue_op_err(stcb, op_err);
2895 sctp_m_freem(op_err);
2899 if ((ch->chunk_type & 0x80) == 0) {
2900 /* discard the rest of this packet */
2902 } /* else skip this bad chunk and
2905 } /* switch of chunk type */
2907 *offset += SCTP_SIZE32(chk_length);
2908 if ((*offset >= length) || stop_proc) {
2909 /* no more data left in the mbuf chain */
2913 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2914 sizeof(struct sctp_chunkhdr),
2915 (uint8_t *)&chunk_buf);
2924 * we need to report rwnd overrun drops.
2926 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2930 * Did we get data, if so update the time for auto-close and
2931 * give peer credit for being alive.
2933 SCTP_STAT_INCR(sctps_recvpktwithdata);
2934 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2935 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2936 stcb->asoc.overall_error_count,
2938 SCTP_FROM_SCTP_INDATA,
2941 stcb->asoc.overall_error_count = 0;
2942 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2944 /* now service all of the reassm queue if needed */
2945 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2946 /* Assure that we ack right away */
2947 stcb->asoc.send_sack = 1;
2949 /* Start a sack timer or QUEUE a SACK for sending */
2950 sctp_sack_check(stcb, was_a_gap);
2955 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2956 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2958 uint32_t *biggest_newly_acked_tsn,
2959 uint32_t *this_sack_lowest_newack,
2962 struct sctp_tmit_chunk *tp1;
2963 unsigned int theTSN;
2964 int j, wake_him = 0, circled = 0;
2966 /* Recover the tp1 we last saw */
2969 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2971 for (j = frag_strt; j <= frag_end; j++) {
2972 theTSN = j + last_tsn;
2974 if (tp1->rec.data.doing_fast_retransmit)
2978 * CMT: CUCv2 algorithm. For each TSN being
2979 * processed from the sent queue, track the
2980 * next expected pseudo-cumack, or
2981 * rtx_pseudo_cumack, if required. Separate
2982 * cumack trackers for first transmissions,
2983 * and retransmissions.
2985 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2986 (tp1->whoTo->find_pseudo_cumack == 1) &&
2987 (tp1->snd_count == 1)) {
2988 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2989 tp1->whoTo->find_pseudo_cumack = 0;
2991 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2992 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2993 (tp1->snd_count > 1)) {
2994 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2995 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2997 if (tp1->rec.data.tsn == theTSN) {
2998 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3000 * must be held until
3003 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3005 * If it is less than RESEND, it is
3006 * now no-longer in flight.
3007 * Higher values may already be set
3008 * via previous Gap Ack Blocks...
3009 * i.e. ACKED or RESEND.
3011 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3012 *biggest_newly_acked_tsn)) {
3013 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3016 * CMT: SFR algo (and HTNA) - set
3017 * saw_newack to 1 for dest being
3018 * newly acked. update
3019 * this_sack_highest_newack if
3022 if (tp1->rec.data.chunk_was_revoked == 0)
3023 tp1->whoTo->saw_newack = 1;
3025 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3026 tp1->whoTo->this_sack_highest_newack)) {
3027 tp1->whoTo->this_sack_highest_newack =
3031 * CMT DAC algo: also update
3032 * this_sack_lowest_newack
3034 if (*this_sack_lowest_newack == 0) {
3035 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3036 sctp_log_sack(*this_sack_lowest_newack,
3041 SCTP_LOG_TSN_ACKED);
3043 *this_sack_lowest_newack = tp1->rec.data.tsn;
3046 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3047 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3048 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3049 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3050 * Separate pseudo_cumack trackers for first transmissions and
3053 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3054 if (tp1->rec.data.chunk_was_revoked == 0) {
3055 tp1->whoTo->new_pseudo_cumack = 1;
3057 tp1->whoTo->find_pseudo_cumack = 1;
3059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3060 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3062 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3063 if (tp1->rec.data.chunk_was_revoked == 0) {
3064 tp1->whoTo->new_pseudo_cumack = 1;
3066 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3068 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3069 sctp_log_sack(*biggest_newly_acked_tsn,
3074 SCTP_LOG_TSN_ACKED);
3076 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3077 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3078 tp1->whoTo->flight_size,
3080 (uint32_t)(uintptr_t)tp1->whoTo,
3083 sctp_flight_size_decrease(tp1);
3084 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3085 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3088 sctp_total_flight_decrease(stcb, tp1);
3090 tp1->whoTo->net_ack += tp1->send_size;
3091 if (tp1->snd_count < 2) {
3093 * True non-retransmitted chunk
3095 tp1->whoTo->net_ack2 += tp1->send_size;
3102 sctp_calculate_rto(stcb,
3105 &tp1->sent_rcv_time,
3106 SCTP_RTT_FROM_DATA)) {
3109 if (tp1->whoTo->rto_needed == 0) {
3110 tp1->whoTo->rto_needed = 1;
3117 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3118 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3119 stcb->asoc.this_sack_highest_gap)) {
3120 stcb->asoc.this_sack_highest_gap =
3123 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3124 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3125 #ifdef SCTP_AUDITING_ENABLED
3126 sctp_audit_log(0xB2,
3127 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3132 * All chunks NOT UNSENT fall through here and are marked
3133 * (leave PR-SCTP ones that are to skip alone though)
3135 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3136 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3137 tp1->sent = SCTP_DATAGRAM_MARKED;
3139 if (tp1->rec.data.chunk_was_revoked) {
3140 /* deflate the cwnd */
3141 tp1->whoTo->cwnd -= tp1->book_size;
3142 tp1->rec.data.chunk_was_revoked = 0;
3144 /* NR Sack code here */
3146 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3147 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3148 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3151 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3154 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3155 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3156 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3157 stcb->asoc.trigger_reset = 1;
3159 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3165 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3166 sctp_m_freem(tp1->data);
3173 } /* if (tp1->tsn == theTSN) */
3174 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3177 tp1 = TAILQ_NEXT(tp1, sctp_next);
3178 if ((tp1 == NULL) && (circled == 0)) {
3180 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3182 } /* end while (tp1) */
3185 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3187 /* In case the fragments were not in order we must reset */
3188 } /* end for (j = fragStart */
3190 return (wake_him); /* Return value only used for nr-sack */
3195 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3196 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3197 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3198 int num_seg, int num_nr_seg, int *rto_ok)
3200 struct sctp_gap_ack_block *frag, block;
3201 struct sctp_tmit_chunk *tp1;
3206 uint16_t frag_strt, frag_end, prev_frag_end;
3208 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3212 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3215 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3217 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3218 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3219 *offset += sizeof(block);
3221 return (chunk_freed);
3223 frag_strt = ntohs(frag->start);
3224 frag_end = ntohs(frag->end);
3226 if (frag_strt > frag_end) {
3227 /* This gap report is malformed, skip it. */
3230 if (frag_strt <= prev_frag_end) {
3231 /* This gap report is not in order, so restart. */
3232 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3234 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3235 *biggest_tsn_acked = last_tsn + frag_end;
3242 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3243 non_revocable, &num_frs, biggest_newly_acked_tsn,
3244 this_sack_lowest_newack, rto_ok)) {
3247 prev_frag_end = frag_end;
3249 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3251 sctp_log_fr(*biggest_tsn_acked,
3252 *biggest_newly_acked_tsn,
3253 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3255 return (chunk_freed);
3259 sctp_check_for_revoked(struct sctp_tcb *stcb,
3260 struct sctp_association *asoc, uint32_t cumack,
3261 uint32_t biggest_tsn_acked)
3263 struct sctp_tmit_chunk *tp1;
3265 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3266 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3268 * ok this guy is either ACK or MARKED. If it is
3269 * ACKED it has been previously acked but not this
3270 * time i.e. revoked. If it is MARKED it was ACK'ed
3273 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3276 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3277 /* it has been revoked */
3278 tp1->sent = SCTP_DATAGRAM_SENT;
3279 tp1->rec.data.chunk_was_revoked = 1;
3281 * We must add this stuff back in to assure
3282 * timers and such get started.
3284 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3285 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3286 tp1->whoTo->flight_size,
3288 (uint32_t)(uintptr_t)tp1->whoTo,
3291 sctp_flight_size_increase(tp1);
3292 sctp_total_flight_increase(stcb, tp1);
3294 * We inflate the cwnd to compensate for our
3295 * artificial inflation of the flight_size.
3297 tp1->whoTo->cwnd += tp1->book_size;
3298 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3299 sctp_log_sack(asoc->last_acked_seq,
3304 SCTP_LOG_TSN_REVOKED);
3306 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3307 /* it has been re-acked in this SACK */
3308 tp1->sent = SCTP_DATAGRAM_ACKED;
3311 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3318 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3319 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3321 struct sctp_tmit_chunk *tp1;
3322 int strike_flag = 0;
3324 int tot_retrans = 0;
3325 uint32_t sending_seq;
3326 struct sctp_nets *net;
3327 int num_dests_sacked = 0;
3330 * select the sending_seq, this is either the next thing ready to be
3331 * sent but not transmitted, OR, the next seq we assign.
3333 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3335 sending_seq = asoc->sending_seq;
3337 sending_seq = tp1->rec.data.tsn;
3340 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3341 if ((asoc->sctp_cmt_on_off > 0) &&
3342 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3343 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3344 if (net->saw_newack)
3348 if (stcb->asoc.prsctp_supported) {
3349 (void)SCTP_GETTIME_TIMEVAL(&now);
3351 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3353 if (tp1->no_fr_allowed) {
3354 /* this one had a timeout or something */
3357 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3358 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3359 sctp_log_fr(biggest_tsn_newly_acked,
3362 SCTP_FR_LOG_CHECK_STRIKE);
3364 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3365 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3369 if (stcb->asoc.prsctp_supported) {
3370 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3371 /* Is it expired? */
3372 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3373 /* Yes so drop it */
3374 if (tp1->data != NULL) {
3375 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3376 SCTP_SO_NOT_LOCKED);
3383 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3384 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3385 /* we are beyond the tsn in the sack */
3388 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3389 /* either a RESEND, ACKED, or MARKED */
3391 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3392 /* Continue strikin FWD-TSN chunks */
3393 tp1->rec.data.fwd_tsn_cnt++;
3398 * CMT : SFR algo (covers part of DAC and HTNA as well)
3400 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3402 * No new acks were receieved for data sent to this
3403 * dest. Therefore, according to the SFR algo for
3404 * CMT, no data sent to this dest can be marked for
3405 * FR using this SACK.
3408 } else if (tp1->whoTo &&
3409 SCTP_TSN_GT(tp1->rec.data.tsn,
3410 tp1->whoTo->this_sack_highest_newack) &&
3411 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3413 * CMT: New acks were receieved for data sent to
3414 * this dest. But no new acks were seen for data
3415 * sent after tp1. Therefore, according to the SFR
3416 * algo for CMT, tp1 cannot be marked for FR using
3417 * this SACK. This step covers part of the DAC algo
3418 * and the HTNA algo as well.
3423 * Here we check to see if we were have already done a FR
3424 * and if so we see if the biggest TSN we saw in the sack is
3425 * smaller than the recovery point. If so we don't strike
3426 * the tsn... otherwise we CAN strike the TSN.
3429 * @@@ JRI: Check for CMT if (accum_moved &&
3430 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3433 if (accum_moved && asoc->fast_retran_loss_recovery) {
3435 * Strike the TSN if in fast-recovery and cum-ack
3438 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3439 sctp_log_fr(biggest_tsn_newly_acked,
3442 SCTP_FR_LOG_STRIKE_CHUNK);
3444 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3447 if ((asoc->sctp_cmt_on_off > 0) &&
3448 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3450 * CMT DAC algorithm: If SACK flag is set to
3451 * 0, then lowest_newack test will not pass
3452 * because it would have been set to the
3453 * cumack earlier. If not already to be
3454 * rtx'd, If not a mixed sack and if tp1 is
3455 * not between two sacked TSNs, then mark by
3456 * one more. NOTE that we are marking by one
3457 * additional time since the SACK DAC flag
3458 * indicates that two packets have been
3459 * received after this missing TSN.
3461 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3462 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3463 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3464 sctp_log_fr(16 + num_dests_sacked,
3467 SCTP_FR_LOG_STRIKE_CHUNK);
3472 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3473 (asoc->sctp_cmt_on_off == 0)) {
3475 * For those that have done a FR we must take
3476 * special consideration if we strike. I.e the
3477 * biggest_newly_acked must be higher than the
3478 * sending_seq at the time we did the FR.
3481 #ifdef SCTP_FR_TO_ALTERNATE
3483 * If FR's go to new networks, then we must only do
3484 * this for singly homed asoc's. However if the FR's
3485 * go to the same network (Armando's work) then its
3486 * ok to FR multiple times.
3494 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3495 tp1->rec.data.fast_retran_tsn)) {
3497 * Strike the TSN, since this ack is
3498 * beyond where things were when we
3501 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3502 sctp_log_fr(biggest_tsn_newly_acked,
3505 SCTP_FR_LOG_STRIKE_CHUNK);
3507 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3511 if ((asoc->sctp_cmt_on_off > 0) &&
3512 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3514 * CMT DAC algorithm: If
3515 * SACK flag is set to 0,
3516 * then lowest_newack test
3517 * will not pass because it
3518 * would have been set to
3519 * the cumack earlier. If
3520 * not already to be rtx'd,
3521 * If not a mixed sack and
3522 * if tp1 is not between two
3523 * sacked TSNs, then mark by
3524 * one more. NOTE that we
3525 * are marking by one
3526 * additional time since the
3527 * SACK DAC flag indicates
3528 * that two packets have
3529 * been received after this
3532 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3533 (num_dests_sacked == 1) &&
3534 SCTP_TSN_GT(this_sack_lowest_newack,
3535 tp1->rec.data.tsn)) {
3536 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3537 sctp_log_fr(32 + num_dests_sacked,
3540 SCTP_FR_LOG_STRIKE_CHUNK);
3542 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3550 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3553 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3554 biggest_tsn_newly_acked)) {
3556 * We don't strike these: This is the HTNA
3557 * algorithm i.e. we don't strike If our TSN is
3558 * larger than the Highest TSN Newly Acked.
3562 /* Strike the TSN */
3563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3564 sctp_log_fr(biggest_tsn_newly_acked,
3567 SCTP_FR_LOG_STRIKE_CHUNK);
3569 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3572 if ((asoc->sctp_cmt_on_off > 0) &&
3573 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3575 * CMT DAC algorithm: If SACK flag is set to
3576 * 0, then lowest_newack test will not pass
3577 * because it would have been set to the
3578 * cumack earlier. If not already to be
3579 * rtx'd, If not a mixed sack and if tp1 is
3580 * not between two sacked TSNs, then mark by
3581 * one more. NOTE that we are marking by one
3582 * additional time since the SACK DAC flag
3583 * indicates that two packets have been
3584 * received after this missing TSN.
3586 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3587 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3588 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3589 sctp_log_fr(48 + num_dests_sacked,
3592 SCTP_FR_LOG_STRIKE_CHUNK);
3598 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3599 struct sctp_nets *alt;
3601 /* fix counts and things */
3602 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3603 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3604 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3606 (uint32_t)(uintptr_t)tp1->whoTo,
3610 tp1->whoTo->net_ack++;
3611 sctp_flight_size_decrease(tp1);
3612 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3613 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3618 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3619 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3620 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3622 /* add back to the rwnd */
3623 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3625 /* remove from the total flight */
3626 sctp_total_flight_decrease(stcb, tp1);
3628 if ((stcb->asoc.prsctp_supported) &&
3629 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3631 * Has it been retransmitted tv_sec times? -
3632 * we store the retran count there.
3634 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3635 /* Yes, so drop it */
3636 if (tp1->data != NULL) {
3637 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3638 SCTP_SO_NOT_LOCKED);
3640 /* Make sure to flag we had a FR */
3641 if (tp1->whoTo != NULL) {
3642 tp1->whoTo->net_ack++;
3648 * SCTP_PRINTF("OK, we are now ready to FR this
3651 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3652 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3656 /* This is a subsequent FR */
3657 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3659 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3660 if (asoc->sctp_cmt_on_off > 0) {
3662 * CMT: Using RTX_SSTHRESH policy for CMT.
3663 * If CMT is being used, then pick dest with
3664 * largest ssthresh for any retransmission.
3666 tp1->no_fr_allowed = 1;
3668 /* sa_ignore NO_NULL_CHK */
3669 if (asoc->sctp_cmt_pf > 0) {
3671 * JRS 5/18/07 - If CMT PF is on,
3672 * use the PF version of
3675 alt = sctp_find_alternate_net(stcb, alt, 2);
3678 * JRS 5/18/07 - If only CMT is on,
3679 * use the CMT version of
3682 /* sa_ignore NO_NULL_CHK */
3683 alt = sctp_find_alternate_net(stcb, alt, 1);
3689 * CUCv2: If a different dest is picked for
3690 * the retransmission, then new
3691 * (rtx-)pseudo_cumack needs to be tracked
3692 * for orig dest. Let CUCv2 track new (rtx-)
3693 * pseudo-cumack always.
3696 tp1->whoTo->find_pseudo_cumack = 1;
3697 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3699 } else { /* CMT is OFF */
3700 #ifdef SCTP_FR_TO_ALTERNATE
3701 /* Can we find an alternate? */
3702 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3705 * default behavior is to NOT retransmit
3706 * FR's to an alternate. Armando Caro's
3707 * paper details why.
3713 tp1->rec.data.doing_fast_retransmit = 1;
3715 /* mark the sending seq for possible subsequent FR's */
3717 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3718 * (uint32_t)tpi->rec.data.tsn);
3720 if (TAILQ_EMPTY(&asoc->send_queue)) {
3722 * If the queue of send is empty then its
3723 * the next sequence number that will be
3724 * assigned so we subtract one from this to
3725 * get the one we last sent.
3727 tp1->rec.data.fast_retran_tsn = sending_seq;
3730 * If there are chunks on the send queue
3731 * (unsent data that has made it from the
3732 * stream queues but not out the door, we
3733 * take the first one (which will have the
3734 * lowest TSN) and subtract one to get the
3737 struct sctp_tmit_chunk *ttt;
3739 ttt = TAILQ_FIRST(&asoc->send_queue);
3740 tp1->rec.data.fast_retran_tsn =
3746 * this guy had a RTO calculation pending on
3749 if ((tp1->whoTo != NULL) &&
3750 (tp1->whoTo->rto_needed == 0)) {
3751 tp1->whoTo->rto_needed = 1;
3755 if (alt != tp1->whoTo) {
3756 /* yes, there is an alternate. */
3757 sctp_free_remote_addr(tp1->whoTo);
3758 /* sa_ignore FREED_MEMORY */
3760 atomic_add_int(&alt->ref_count, 1);
3766 struct sctp_tmit_chunk *
3767 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3768 struct sctp_association *asoc)
3770 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3774 if (asoc->prsctp_supported == 0) {
3777 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3778 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3779 tp1->sent != SCTP_DATAGRAM_RESEND &&
3780 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3781 /* no chance to advance, out of here */
3784 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3785 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3786 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3787 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3788 asoc->advanced_peer_ack_point,
3789 tp1->rec.data.tsn, 0, 0);
3792 if (!PR_SCTP_ENABLED(tp1->flags)) {
3794 * We can't fwd-tsn past any that are reliable aka
3795 * retransmitted until the asoc fails.
3800 (void)SCTP_GETTIME_TIMEVAL(&now);
3804 * now we got a chunk which is marked for another
3805 * retransmission to a PR-stream but has run out its chances
3806 * already maybe OR has been marked to skip now. Can we skip
3807 * it if its a resend?
3809 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3810 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3812 * Now is this one marked for resend and its time is
3815 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3816 /* Yes so drop it */
3818 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3819 1, SCTP_SO_NOT_LOCKED);
3823 * No, we are done when hit one for resend
3824 * whos time as not expired.
3830 * Ok now if this chunk is marked to drop it we can clean up
3831 * the chunk, advance our peer ack point and we can check
3834 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3835 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3836 /* advance PeerAckPoint goes forward */
3837 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3838 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3840 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3841 /* No update but we do save the chk */
3846 * If it is still in RESEND we can advance no
3856 sctp_fs_audit(struct sctp_association *asoc)
3858 struct sctp_tmit_chunk *chk;
3859 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3862 int entry_flight, entry_cnt;
3867 entry_flight = asoc->total_flight;
3868 entry_cnt = asoc->total_flight_count;
3870 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3873 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3874 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3875 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3880 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3882 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3884 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3891 if ((inflight > 0) || (inbetween > 0)) {
3893 panic("Flight size-express incorrect? \n");
3895 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3896 entry_flight, entry_cnt);
3898 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3899 inflight, inbetween, resend, above, acked);
3908 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3909 struct sctp_association *asoc,
3910 struct sctp_tmit_chunk *tp1)
3912 tp1->window_probe = 0;
3913 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3914 /* TSN's skipped we do NOT move back. */
3915 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3916 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3918 (uint32_t)(uintptr_t)tp1->whoTo,
3922 /* First setup this by shrinking flight */
3923 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3924 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3927 sctp_flight_size_decrease(tp1);
3928 sctp_total_flight_decrease(stcb, tp1);
3929 /* Now mark for resend */
3930 tp1->sent = SCTP_DATAGRAM_RESEND;
3931 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3933 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3934 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3935 tp1->whoTo->flight_size,
3937 (uint32_t)(uintptr_t)tp1->whoTo,
3943 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3944 uint32_t rwnd, int *abort_now, int ecne_seen)
3946 struct sctp_nets *net;
3947 struct sctp_association *asoc;
3948 struct sctp_tmit_chunk *tp1, *tp2;
3950 int win_probe_recovery = 0;
3951 int win_probe_recovered = 0;
3952 int j, done_once = 0;
3956 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3957 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3958 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3960 SCTP_TCB_LOCK_ASSERT(stcb);
3961 #ifdef SCTP_ASOCLOG_OF_TSNS
3962 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3963 stcb->asoc.cumack_log_at++;
3964 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3965 stcb->asoc.cumack_log_at = 0;
3969 old_rwnd = asoc->peers_rwnd;
3970 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3973 } else if (asoc->last_acked_seq == cumack) {
3974 /* Window update sack */
3975 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3976 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3977 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3978 /* SWS sender side engages */
3979 asoc->peers_rwnd = 0;
3981 if (asoc->peers_rwnd > old_rwnd) {
3987 /* First setup for CC stuff */
3988 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3989 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3990 /* Drag along the window_tsn for cwr's */
3991 net->cwr_window_tsn = cumack;
3993 net->prev_cwnd = net->cwnd;
3998 * CMT: Reset CUC and Fast recovery algo variables before
4001 net->new_pseudo_cumack = 0;
4002 net->will_exit_fast_recovery = 0;
4003 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4004 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4007 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4008 tp1 = TAILQ_LAST(&asoc->sent_queue,
4009 sctpchunk_listhead);
4010 send_s = tp1->rec.data.tsn + 1;
4012 send_s = asoc->sending_seq;
4014 if (SCTP_TSN_GE(cumack, send_s)) {
4015 struct mbuf *op_err;
4016 char msg[SCTP_DIAG_INFO_LEN];
4020 SCTP_SNPRINTF(msg, sizeof(msg),
4021 "Cum ack %8.8x greater or equal than TSN %8.8x",
4023 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4024 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4025 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4028 asoc->this_sack_highest_gap = cumack;
4029 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4030 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4031 stcb->asoc.overall_error_count,
4033 SCTP_FROM_SCTP_INDATA,
4036 stcb->asoc.overall_error_count = 0;
4037 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4038 /* process the new consecutive TSN first */
4039 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4040 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4041 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4042 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4044 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4046 * If it is less than ACKED, it is
4047 * now no-longer in flight. Higher
4048 * values may occur during marking
4050 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4051 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4052 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4053 tp1->whoTo->flight_size,
4055 (uint32_t)(uintptr_t)tp1->whoTo,
4058 sctp_flight_size_decrease(tp1);
4059 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4060 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4063 /* sa_ignore NO_NULL_CHK */
4064 sctp_total_flight_decrease(stcb, tp1);
4066 tp1->whoTo->net_ack += tp1->send_size;
4067 if (tp1->snd_count < 2) {
4069 * True non-retransmitted
4072 tp1->whoTo->net_ack2 +=
4075 /* update RTO too? */
4078 sctp_calculate_rto(stcb,
4081 &tp1->sent_rcv_time,
4082 SCTP_RTT_FROM_DATA)) {
4085 if (tp1->whoTo->rto_needed == 0) {
4086 tp1->whoTo->rto_needed = 1;
4092 * CMT: CUCv2 algorithm. From the
4093 * cumack'd TSNs, for each TSN being
4094 * acked for the first time, set the
4095 * following variables for the
4096 * corresp destination.
4097 * new_pseudo_cumack will trigger a
4099 * find_(rtx_)pseudo_cumack will
4100 * trigger search for the next
4101 * expected (rtx-)pseudo-cumack.
4103 tp1->whoTo->new_pseudo_cumack = 1;
4104 tp1->whoTo->find_pseudo_cumack = 1;
4105 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4107 /* sa_ignore NO_NULL_CHK */
4108 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4111 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4112 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4114 if (tp1->rec.data.chunk_was_revoked) {
4115 /* deflate the cwnd */
4116 tp1->whoTo->cwnd -= tp1->book_size;
4117 tp1->rec.data.chunk_was_revoked = 0;
4119 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4120 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4121 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4124 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4128 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4129 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4130 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4131 asoc->trigger_reset = 1;
4133 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4135 /* sa_ignore NO_NULL_CHK */
4136 sctp_free_bufspace(stcb, asoc, tp1, 1);
4137 sctp_m_freem(tp1->data);
4140 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4141 sctp_log_sack(asoc->last_acked_seq,
4146 SCTP_LOG_FREE_SENT);
4148 asoc->sent_queue_cnt--;
4149 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4156 /* sa_ignore NO_NULL_CHK */
4157 if (stcb->sctp_socket) {
4158 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4160 /* sa_ignore NO_NULL_CHK */
4161 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4163 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4165 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4166 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4170 /* JRS - Use the congestion control given in the CC module */
4171 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4172 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4173 if (net->net_ack2 > 0) {
4175 * Karn's rule applies to clearing error
4176 * count, this is optional.
4178 net->error_count = 0;
4179 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4180 /* addr came good */
4181 net->dest_state |= SCTP_ADDR_REACHABLE;
4182 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4183 0, (void *)net, SCTP_SO_NOT_LOCKED);
4185 if (net == stcb->asoc.primary_destination) {
4186 if (stcb->asoc.alternate) {
4188 * release the alternate,
4191 sctp_free_remote_addr(stcb->asoc.alternate);
4192 stcb->asoc.alternate = NULL;
4195 if (net->dest_state & SCTP_ADDR_PF) {
4196 net->dest_state &= ~SCTP_ADDR_PF;
4197 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4198 stcb->sctp_ep, stcb, net,
4199 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4200 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4201 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4202 /* Done with this net */
4205 /* restore any doubled timers */
4206 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4207 if (net->RTO < stcb->asoc.minrto) {
4208 net->RTO = stcb->asoc.minrto;
4210 if (net->RTO > stcb->asoc.maxrto) {
4211 net->RTO = stcb->asoc.maxrto;
4215 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4217 asoc->last_acked_seq = cumack;
4219 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4220 /* nothing left in-flight */
4221 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4222 net->flight_size = 0;
4223 net->partial_bytes_acked = 0;
4225 asoc->total_flight = 0;
4226 asoc->total_flight_count = 0;
4230 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4231 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4232 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4233 /* SWS sender side engages */
4234 asoc->peers_rwnd = 0;
4236 if (asoc->peers_rwnd > old_rwnd) {
4237 win_probe_recovery = 1;
4239 /* Now assure a timer where data is queued at */
4242 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4243 if (win_probe_recovery && (net->window_probe)) {
4244 win_probe_recovered = 1;
4246 * Find first chunk that was used with window probe
4247 * and clear the sent
4249 /* sa_ignore FREED_MEMORY */
4250 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4251 if (tp1->window_probe) {
4252 /* move back to data send queue */
4253 sctp_window_probe_recovery(stcb, asoc, tp1);
4258 if (net->flight_size) {
4260 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4261 if (net->window_probe) {
4262 net->window_probe = 0;
4265 if (net->window_probe) {
4267 * In window probes we must assure a timer
4268 * is still running there
4270 net->window_probe = 0;
4271 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4272 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4274 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4275 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4277 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4282 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4283 (asoc->sent_queue_retran_cnt == 0) &&
4284 (win_probe_recovered == 0) &&
4287 * huh, this should not happen unless all packets are
4288 * PR-SCTP and marked to skip of course.
4290 if (sctp_fs_audit(asoc)) {
4291 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4292 net->flight_size = 0;
4294 asoc->total_flight = 0;
4295 asoc->total_flight_count = 0;
4296 asoc->sent_queue_retran_cnt = 0;
4297 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4298 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4299 sctp_flight_size_increase(tp1);
4300 sctp_total_flight_increase(stcb, tp1);
4301 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4302 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4309 /**********************************/
4310 /* Now what about shutdown issues */
4311 /**********************************/
4312 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4313 /* nothing left on sendqueue.. consider done */
4315 if ((asoc->stream_queue_cnt == 1) &&
4316 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4317 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4318 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4319 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4321 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4322 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4323 (asoc->stream_queue_cnt == 1) &&
4324 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4325 struct mbuf *op_err;
4329 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4330 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4331 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4334 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4335 (asoc->stream_queue_cnt == 0)) {
4336 struct sctp_nets *netp;
4338 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4339 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4340 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4342 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4343 sctp_stop_timers_for_shutdown(stcb);
4344 if (asoc->alternate) {
4345 netp = asoc->alternate;
4347 netp = asoc->primary_destination;
4349 sctp_send_shutdown(stcb, netp);
4350 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4351 stcb->sctp_ep, stcb, netp);
4352 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4353 stcb->sctp_ep, stcb, NULL);
4354 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4355 (asoc->stream_queue_cnt == 0)) {
4356 struct sctp_nets *netp;
4358 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4359 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4360 sctp_stop_timers_for_shutdown(stcb);
4361 if (asoc->alternate) {
4362 netp = asoc->alternate;
4364 netp = asoc->primary_destination;
4366 sctp_send_shutdown_ack(stcb, netp);
4367 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4368 stcb->sctp_ep, stcb, netp);
4371 /*********************************************/
4372 /* Here we perform PR-SCTP procedures */
4374 /*********************************************/
4375 /* C1. update advancedPeerAckPoint */
4376 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4377 asoc->advanced_peer_ack_point = cumack;
4379 /* PR-Sctp issues need to be addressed too */
4380 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4381 struct sctp_tmit_chunk *lchk;
4382 uint32_t old_adv_peer_ack_point;
4384 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4385 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4386 /* C3. See if we need to send a Fwd-TSN */
4387 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4389 * ISSUE with ECN, see FWD-TSN processing.
4391 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4392 send_forward_tsn(stcb, asoc);
4394 /* try to FR fwd-tsn's that get lost too */
4395 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4396 send_forward_tsn(stcb, asoc);
4400 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4401 if (lchk->whoTo != NULL) {
4406 /* Assure a timer is up */
4407 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4408 stcb->sctp_ep, stcb, lchk->whoTo);
4411 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4412 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4414 stcb->asoc.peers_rwnd,
4415 stcb->asoc.total_flight,
4416 stcb->asoc.total_output_queue_size);
4421 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4422 struct sctp_tcb *stcb,
4423 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4424 int *abort_now, uint8_t flags,
4425 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4427 struct sctp_association *asoc;
4428 struct sctp_tmit_chunk *tp1, *tp2;
4429 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4430 uint16_t wake_him = 0;
4431 uint32_t send_s = 0;
4433 int accum_moved = 0;
4434 int will_exit_fast_recovery = 0;
4435 uint32_t a_rwnd, old_rwnd;
4436 int win_probe_recovery = 0;
4437 int win_probe_recovered = 0;
4438 struct sctp_nets *net = NULL;
4441 uint8_t reneged_all = 0;
4442 uint8_t cmt_dac_flag;
4445 * we take any chance we can to service our queues since we cannot
4446 * get awoken when the socket is read from :<
4449 * Now perform the actual SACK handling: 1) Verify that it is not an
4450 * old sack, if so discard. 2) If there is nothing left in the send
4451 * queue (cum-ack is equal to last acked) then you have a duplicate
4452 * too, update any rwnd change and verify no timers are running.
4453 * then return. 3) Process any new consequtive data i.e. cum-ack
4454 * moved process these first and note that it moved. 4) Process any
4455 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4456 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4457 * sync up flightsizes and things, stop all timers and also check
4458 * for shutdown_pending state. If so then go ahead and send off the
4459 * shutdown. If in shutdown recv, send off the shutdown-ack and
4460 * start that timer, Ret. 9) Strike any non-acked things and do FR
4461 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4462 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4463 * if in shutdown_recv state.
4465 SCTP_TCB_LOCK_ASSERT(stcb);
4467 this_sack_lowest_newack = 0;
4468 SCTP_STAT_INCR(sctps_slowpath_sack);
4470 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4471 #ifdef SCTP_ASOCLOG_OF_TSNS
4472 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4473 stcb->asoc.cumack_log_at++;
4474 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4475 stcb->asoc.cumack_log_at = 0;
4480 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4481 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4482 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4485 old_rwnd = stcb->asoc.peers_rwnd;
4486 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4487 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4488 stcb->asoc.overall_error_count,
4490 SCTP_FROM_SCTP_INDATA,
4493 stcb->asoc.overall_error_count = 0;
4495 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4496 sctp_log_sack(asoc->last_acked_seq,
4503 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4505 uint32_t *dupdata, dblock;
4507 for (i = 0; i < num_dup; i++) {
4508 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4509 sizeof(uint32_t), (uint8_t *)&dblock);
4510 if (dupdata == NULL) {
4513 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4517 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4518 tp1 = TAILQ_LAST(&asoc->sent_queue,
4519 sctpchunk_listhead);
4520 send_s = tp1->rec.data.tsn + 1;
4523 send_s = asoc->sending_seq;
4525 if (SCTP_TSN_GE(cum_ack, send_s)) {
4526 struct mbuf *op_err;
4527 char msg[SCTP_DIAG_INFO_LEN];
4530 * no way, we have not even sent this TSN out yet. Peer is
4531 * hopelessly messed up with us.
4533 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4536 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4537 tp1->rec.data.tsn, (void *)tp1);
4542 SCTP_SNPRINTF(msg, sizeof(msg),
4543 "Cum ack %8.8x greater or equal than TSN %8.8x",
4545 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4546 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4547 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4550 /**********************/
4551 /* 1) check the range */
4552 /**********************/
4553 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4554 /* acking something behind */
4558 /* update the Rwnd of the peer */
4559 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4560 TAILQ_EMPTY(&asoc->send_queue) &&
4561 (asoc->stream_queue_cnt == 0)) {
4562 /* nothing left on send/sent and strmq */
4563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4564 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4565 asoc->peers_rwnd, 0, 0, a_rwnd);
4567 asoc->peers_rwnd = a_rwnd;
4568 if (asoc->sent_queue_retran_cnt) {
4569 asoc->sent_queue_retran_cnt = 0;
4571 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4572 /* SWS sender side engages */
4573 asoc->peers_rwnd = 0;
4575 /* stop any timers */
4576 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4577 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4578 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4579 net->partial_bytes_acked = 0;
4580 net->flight_size = 0;
4582 asoc->total_flight = 0;
4583 asoc->total_flight_count = 0;
4587 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4588 * things. The total byte count acked is tracked in netAckSz AND
4589 * netAck2 is used to track the total bytes acked that are un-
4590 * amibguious and were never retransmitted. We track these on a per
4591 * destination address basis.
4593 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4594 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4595 /* Drag along the window_tsn for cwr's */
4596 net->cwr_window_tsn = cum_ack;
4598 net->prev_cwnd = net->cwnd;
4603 * CMT: Reset CUC and Fast recovery algo variables before
4606 net->new_pseudo_cumack = 0;
4607 net->will_exit_fast_recovery = 0;
4608 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4609 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4613 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4614 * to be greater than the cumack. Also reset saw_newack to 0
4617 net->saw_newack = 0;
4618 net->this_sack_highest_newack = last_tsn;
4620 /* process the new consecutive TSN first */
4621 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4622 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4623 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4625 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4627 * If it is less than ACKED, it is
4628 * now no-longer in flight. Higher
4629 * values may occur during marking
4631 if ((tp1->whoTo->dest_state &
4632 SCTP_ADDR_UNCONFIRMED) &&
4633 (tp1->snd_count < 2)) {
4635 * If there was no retran
4636 * and the address is
4637 * un-confirmed and we sent
4639 * sacked.. its confirmed,
4642 tp1->whoTo->dest_state &=
4643 ~SCTP_ADDR_UNCONFIRMED;
4645 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4646 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4647 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4648 tp1->whoTo->flight_size,
4650 (uint32_t)(uintptr_t)tp1->whoTo,
4653 sctp_flight_size_decrease(tp1);
4654 sctp_total_flight_decrease(stcb, tp1);
4655 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4656 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4660 tp1->whoTo->net_ack += tp1->send_size;
4662 /* CMT SFR and DAC algos */
4663 this_sack_lowest_newack = tp1->rec.data.tsn;
4664 tp1->whoTo->saw_newack = 1;
4666 if (tp1->snd_count < 2) {
4668 * True non-retransmitted
4671 tp1->whoTo->net_ack2 +=
4674 /* update RTO too? */
4677 sctp_calculate_rto(stcb,
4680 &tp1->sent_rcv_time,
4681 SCTP_RTT_FROM_DATA)) {
4684 if (tp1->whoTo->rto_needed == 0) {
4685 tp1->whoTo->rto_needed = 1;
4691 * CMT: CUCv2 algorithm. From the
4692 * cumack'd TSNs, for each TSN being
4693 * acked for the first time, set the
4694 * following variables for the
4695 * corresp destination.
4696 * new_pseudo_cumack will trigger a
4698 * find_(rtx_)pseudo_cumack will
4699 * trigger search for the next
4700 * expected (rtx-)pseudo-cumack.
4702 tp1->whoTo->new_pseudo_cumack = 1;
4703 tp1->whoTo->find_pseudo_cumack = 1;
4704 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4706 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4707 sctp_log_sack(asoc->last_acked_seq,
4712 SCTP_LOG_TSN_ACKED);
4714 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4715 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4718 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4719 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4720 #ifdef SCTP_AUDITING_ENABLED
4721 sctp_audit_log(0xB3,
4722 (asoc->sent_queue_retran_cnt & 0x000000ff));
4725 if (tp1->rec.data.chunk_was_revoked) {
4726 /* deflate the cwnd */
4727 tp1->whoTo->cwnd -= tp1->book_size;
4728 tp1->rec.data.chunk_was_revoked = 0;
4730 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4731 tp1->sent = SCTP_DATAGRAM_ACKED;
4738 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4739 /* always set this up to cum-ack */
4740 asoc->this_sack_highest_gap = last_tsn;
4742 if ((num_seg > 0) || (num_nr_seg > 0)) {
4745 * thisSackHighestGap will increase while handling NEW
4746 * segments this_sack_highest_newack will increase while
4747 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4748 * used for CMT DAC algo. saw_newack will also change.
4750 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4751 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4752 num_seg, num_nr_seg, &rto_ok)) {
4756 * validate the biggest_tsn_acked in the gap acks if strict
4757 * adherence is wanted.
4759 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4761 * peer is either confused or we are under attack.
4764 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4765 biggest_tsn_acked, send_s);
4769 /*******************************************/
4770 /* cancel ALL T3-send timer if accum moved */
4771 /*******************************************/
4772 if (asoc->sctp_cmt_on_off > 0) {
4773 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4774 if (net->new_pseudo_cumack)
4775 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4777 SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4782 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4783 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4784 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4788 /********************************************/
4789 /* drop the acked chunks from the sentqueue */
4790 /********************************************/
4791 asoc->last_acked_seq = cum_ack;
4793 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4794 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4797 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4798 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4799 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4802 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4806 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4807 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4808 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4809 asoc->trigger_reset = 1;
4811 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4812 if (PR_SCTP_ENABLED(tp1->flags)) {
4813 if (asoc->pr_sctp_cnt != 0)
4814 asoc->pr_sctp_cnt--;
4816 asoc->sent_queue_cnt--;
4818 /* sa_ignore NO_NULL_CHK */
4819 sctp_free_bufspace(stcb, asoc, tp1, 1);
4820 sctp_m_freem(tp1->data);
4822 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4823 asoc->sent_queue_cnt_removeable--;
4826 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4827 sctp_log_sack(asoc->last_acked_seq,
4832 SCTP_LOG_FREE_SENT);
4834 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4837 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4839 panic("Warning flight size is positive and should be 0");
4841 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4842 asoc->total_flight);
4844 asoc->total_flight = 0;
4847 /* sa_ignore NO_NULL_CHK */
4848 if ((wake_him) && (stcb->sctp_socket)) {
4849 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4850 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4851 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4853 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4855 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4856 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4860 if (asoc->fast_retran_loss_recovery && accum_moved) {
4861 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4862 /* Setup so we will exit RFC2582 fast recovery */
4863 will_exit_fast_recovery = 1;
4867 * Check for revoked fragments:
4869 * if Previous sack - Had no frags then we can't have any revoked if
4870 * Previous sack - Had frag's then - If we now have frags aka
4871 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4872 * some of them. else - The peer revoked all ACKED fragments, since
4873 * we had some before and now we have NONE.
4877 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4878 asoc->saw_sack_with_frags = 1;
4879 } else if (asoc->saw_sack_with_frags) {
4880 int cnt_revoked = 0;
4882 /* Peer revoked all dg's marked or acked */
4883 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4884 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4885 tp1->sent = SCTP_DATAGRAM_SENT;
4886 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4887 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4888 tp1->whoTo->flight_size,
4890 (uint32_t)(uintptr_t)tp1->whoTo,
4893 sctp_flight_size_increase(tp1);
4894 sctp_total_flight_increase(stcb, tp1);
4895 tp1->rec.data.chunk_was_revoked = 1;
4897 * To ensure that this increase in
4898 * flightsize, which is artificial, does not
4899 * throttle the sender, we also increase the
4900 * cwnd artificially.
4902 tp1->whoTo->cwnd += tp1->book_size;
4909 asoc->saw_sack_with_frags = 0;
4912 asoc->saw_sack_with_nr_frags = 1;
4914 asoc->saw_sack_with_nr_frags = 0;
4916 /* JRS - Use the congestion control given in the CC module */
4917 if (ecne_seen == 0) {
4918 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4919 if (net->net_ack2 > 0) {
4921 * Karn's rule applies to clearing error
4922 * count, this is optional.
4924 net->error_count = 0;
4925 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4926 /* addr came good */
4927 net->dest_state |= SCTP_ADDR_REACHABLE;
4928 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4929 0, (void *)net, SCTP_SO_NOT_LOCKED);
4932 if (net == stcb->asoc.primary_destination) {
4933 if (stcb->asoc.alternate) {
4935 * release the alternate,
4938 sctp_free_remote_addr(stcb->asoc.alternate);
4939 stcb->asoc.alternate = NULL;
4943 if (net->dest_state & SCTP_ADDR_PF) {
4944 net->dest_state &= ~SCTP_ADDR_PF;
4945 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4946 stcb->sctp_ep, stcb, net,
4947 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4948 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4949 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4950 /* Done with this net */
4953 /* restore any doubled timers */
4954 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4955 if (net->RTO < stcb->asoc.minrto) {
4956 net->RTO = stcb->asoc.minrto;
4958 if (net->RTO > stcb->asoc.maxrto) {
4959 net->RTO = stcb->asoc.maxrto;
4963 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4966 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4967 /* nothing left in-flight */
4968 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4969 /* stop all timers */
4970 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4972 SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4973 net->flight_size = 0;
4974 net->partial_bytes_acked = 0;
4976 asoc->total_flight = 0;
4977 asoc->total_flight_count = 0;
4980 /**********************************/
4981 /* Now what about shutdown issues */
4982 /**********************************/
4983 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4984 /* nothing left on sendqueue.. consider done */
4985 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4986 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4987 asoc->peers_rwnd, 0, 0, a_rwnd);
4989 asoc->peers_rwnd = a_rwnd;
4990 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4991 /* SWS sender side engages */
4992 asoc->peers_rwnd = 0;
4995 if ((asoc->stream_queue_cnt == 1) &&
4996 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4997 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4998 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4999 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5001 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5002 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5003 (asoc->stream_queue_cnt == 1) &&
5004 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5005 struct mbuf *op_err;
5009 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5010 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5011 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5014 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5015 (asoc->stream_queue_cnt == 0)) {
5016 struct sctp_nets *netp;
5018 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5019 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5020 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5022 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5023 sctp_stop_timers_for_shutdown(stcb);
5024 if (asoc->alternate) {
5025 netp = asoc->alternate;
5027 netp = asoc->primary_destination;
5029 sctp_send_shutdown(stcb, netp);
5030 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5031 stcb->sctp_ep, stcb, netp);
5032 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5033 stcb->sctp_ep, stcb, NULL);
5035 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5036 (asoc->stream_queue_cnt == 0)) {
5037 struct sctp_nets *netp;
5039 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5040 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5041 sctp_stop_timers_for_shutdown(stcb);
5042 if (asoc->alternate) {
5043 netp = asoc->alternate;
5045 netp = asoc->primary_destination;
5047 sctp_send_shutdown_ack(stcb, netp);
5048 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5049 stcb->sctp_ep, stcb, netp);
5054 * Now here we are going to recycle net_ack for a different use...
5057 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5062 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5063 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5064 * automatically ensure that.
5066 if ((asoc->sctp_cmt_on_off > 0) &&
5067 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5068 (cmt_dac_flag == 0)) {
5069 this_sack_lowest_newack = cum_ack;
5071 if ((num_seg > 0) || (num_nr_seg > 0)) {
5072 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5073 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5075 /* JRS - Use the congestion control given in the CC module */
5076 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5078 /* Now are we exiting loss recovery ? */
5079 if (will_exit_fast_recovery) {
5080 /* Ok, we must exit fast recovery */
5081 asoc->fast_retran_loss_recovery = 0;
5083 if ((asoc->sat_t3_loss_recovery) &&
5084 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5085 /* end satellite t3 loss recovery */
5086 asoc->sat_t3_loss_recovery = 0;
5091 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5092 if (net->will_exit_fast_recovery) {
5093 /* Ok, we must exit fast recovery */
5094 net->fast_retran_loss_recovery = 0;
5098 /* Adjust and set the new rwnd value */
5099 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5100 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5101 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5103 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5104 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5105 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5106 /* SWS sender side engages */
5107 asoc->peers_rwnd = 0;
5109 if (asoc->peers_rwnd > old_rwnd) {
5110 win_probe_recovery = 1;
5114 * Now we must setup so we have a timer up for anyone with
5120 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5121 if (win_probe_recovery && (net->window_probe)) {
5122 win_probe_recovered = 1;
5124 * Find first chunk that was used with
5125 * window probe and clear the event. Put
5126 * it back into the send queue as if has
5129 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5130 if (tp1->window_probe) {
5131 sctp_window_probe_recovery(stcb, asoc, tp1);
5136 if (net->flight_size) {
5138 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5139 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5140 stcb->sctp_ep, stcb, net);
5142 if (net->window_probe) {
5143 net->window_probe = 0;
5146 if (net->window_probe) {
5148 * In window probes we must assure a timer
5149 * is still running there
5151 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5152 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5153 stcb->sctp_ep, stcb, net);
5156 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5157 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5159 SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5164 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5165 (asoc->sent_queue_retran_cnt == 0) &&
5166 (win_probe_recovered == 0) &&
5169 * huh, this should not happen unless all packets are
5170 * PR-SCTP and marked to skip of course.
5172 if (sctp_fs_audit(asoc)) {
5173 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5174 net->flight_size = 0;
5176 asoc->total_flight = 0;
5177 asoc->total_flight_count = 0;
5178 asoc->sent_queue_retran_cnt = 0;
5179 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5180 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5181 sctp_flight_size_increase(tp1);
5182 sctp_total_flight_increase(stcb, tp1);
5183 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5184 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5191 /*********************************************/
5192 /* Here we perform PR-SCTP procedures */
5194 /*********************************************/
5195 /* C1. update advancedPeerAckPoint */
5196 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5197 asoc->advanced_peer_ack_point = cum_ack;
5199 /* C2. try to further move advancedPeerAckPoint ahead */
5200 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5201 struct sctp_tmit_chunk *lchk;
5202 uint32_t old_adv_peer_ack_point;
5204 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5205 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5206 /* C3. See if we need to send a Fwd-TSN */
5207 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5209 * ISSUE with ECN, see FWD-TSN processing.
5211 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5212 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5213 0xee, cum_ack, asoc->advanced_peer_ack_point,
5214 old_adv_peer_ack_point);
5216 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5217 send_forward_tsn(stcb, asoc);
5219 /* try to FR fwd-tsn's that get lost too */
5220 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5221 send_forward_tsn(stcb, asoc);
5225 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5226 if (lchk->whoTo != NULL) {
5231 /* Assure a timer is up */
5232 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5233 stcb->sctp_ep, stcb, lchk->whoTo);
5236 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5237 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5239 stcb->asoc.peers_rwnd,
5240 stcb->asoc.total_flight,
5241 stcb->asoc.total_output_queue_size);
5246 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5249 uint32_t cum_ack, a_rwnd;
5251 cum_ack = ntohl(cp->cumulative_tsn_ack);
5252 /* Arrange so a_rwnd does NOT change */
5253 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5255 /* Now call the express sack handling */
5256 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5260 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5261 struct sctp_stream_in *strmin)
5263 struct sctp_queued_to_read *control, *ncontrol;
5264 struct sctp_association *asoc;
5266 int need_reasm_check = 0;
5269 mid = strmin->last_mid_delivered;
5271 * First deliver anything prior to and including the stream no that
5274 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5275 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5276 /* this is deliverable now */
5277 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5278 if (control->on_strm_q) {
5279 if (control->on_strm_q == SCTP_ON_ORDERED) {
5280 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5281 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5282 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5285 panic("strmin: %p ctl: %p unknown %d",
5286 strmin, control, control->on_strm_q);
5289 control->on_strm_q = 0;
5291 /* subtract pending on streams */
5292 if (asoc->size_on_all_streams >= control->length) {
5293 asoc->size_on_all_streams -= control->length;
5296 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5298 asoc->size_on_all_streams = 0;
5301 sctp_ucount_decr(asoc->cnt_on_all_streams);
5302 /* deliver it to at least the delivery-q */
5303 if (stcb->sctp_socket) {
5304 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5305 sctp_add_to_readq(stcb->sctp_ep, stcb,
5307 &stcb->sctp_socket->so_rcv,
5308 1, SCTP_READ_LOCK_HELD,
5309 SCTP_SO_NOT_LOCKED);
5312 /* Its a fragmented message */
5313 if (control->first_frag_seen) {
5315 * Make it so this is next to
5316 * deliver, we restore later
5318 strmin->last_mid_delivered = control->mid - 1;
5319 need_reasm_check = 1;
5324 /* no more delivery now. */
5328 if (need_reasm_check) {
5331 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5332 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5333 /* Restore the next to deliver unless we are ahead */
5334 strmin->last_mid_delivered = mid;
5337 /* Left the front Partial one on */
5340 need_reasm_check = 0;
5343 * now we must deliver things in queue the normal way if any are
5346 mid = strmin->last_mid_delivered + 1;
5347 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5348 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5349 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5350 /* this is deliverable now */
5351 if (control->on_strm_q) {
5352 if (control->on_strm_q == SCTP_ON_ORDERED) {
5353 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5354 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5355 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5358 panic("strmin: %p ctl: %p unknown %d",
5359 strmin, control, control->on_strm_q);
5362 control->on_strm_q = 0;
5364 /* subtract pending on streams */
5365 if (asoc->size_on_all_streams >= control->length) {
5366 asoc->size_on_all_streams -= control->length;
5369 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5371 asoc->size_on_all_streams = 0;
5374 sctp_ucount_decr(asoc->cnt_on_all_streams);
5375 /* deliver it to at least the delivery-q */
5376 strmin->last_mid_delivered = control->mid;
5377 if (stcb->sctp_socket) {
5378 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5379 sctp_add_to_readq(stcb->sctp_ep, stcb,
5381 &stcb->sctp_socket->so_rcv, 1,
5382 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5385 mid = strmin->last_mid_delivered + 1;
5387 /* Its a fragmented message */
5388 if (control->first_frag_seen) {
5390 * Make it so this is next to
5393 strmin->last_mid_delivered = control->mid - 1;
5394 need_reasm_check = 1;
5402 if (need_reasm_check) {
5403 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5410 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5411 struct sctp_association *asoc, struct sctp_stream_in *strm,
5412 struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5414 struct sctp_tmit_chunk *chk, *nchk;
5417 * For now large messages held on the stream reasm that are complete
5418 * will be tossed too. We could in theory do more work to spin
5419 * through and stop after dumping one msg aka seeing the start of a
5420 * new msg at the head, and call the delivery function... to see if
5421 * it can be delivered... But for now we just dump everything on the
5424 if (!asoc->idata_supported && !ordered &&
5425 control->first_frag_seen &&
5426 SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5429 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5430 /* Purge hanging chunks */
5431 if (!asoc->idata_supported && !ordered) {
5432 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5436 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5437 if (asoc->size_on_reasm_queue >= chk->send_size) {
5438 asoc->size_on_reasm_queue -= chk->send_size;
5441 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5443 asoc->size_on_reasm_queue = 0;
5446 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5448 sctp_m_freem(chk->data);
5451 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5453 if (!TAILQ_EMPTY(&control->reasm)) {
5454 /* This has to be old data, unordered */
5455 if (control->data) {
5456 sctp_m_freem(control->data);
5457 control->data = NULL;
5459 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5460 chk = TAILQ_FIRST(&control->reasm);
5461 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5462 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5463 sctp_add_chk_to_control(control, strm, stcb, asoc,
5464 chk, SCTP_READ_LOCK_HELD);
5466 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5469 if (control->on_strm_q == SCTP_ON_ORDERED) {
5470 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5471 if (asoc->size_on_all_streams >= control->length) {
5472 asoc->size_on_all_streams -= control->length;
5475 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5477 asoc->size_on_all_streams = 0;
5480 sctp_ucount_decr(asoc->cnt_on_all_streams);
5481 control->on_strm_q = 0;
5482 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5483 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5484 control->on_strm_q = 0;
5486 } else if (control->on_strm_q) {
5487 panic("strm: %p ctl: %p unknown %d",
5488 strm, control, control->on_strm_q);
5491 control->on_strm_q = 0;
5492 if (control->on_read_q == 0) {
5493 sctp_free_remote_addr(control->whoFrom);
5494 if (control->data) {
5495 sctp_m_freem(control->data);
5496 control->data = NULL;
5498 sctp_free_a_readq(stcb, control);
5503 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5504 struct sctp_forward_tsn_chunk *fwd,
5505 int *abort_flag, struct mbuf *m, int offset)
5507 /* The pr-sctp fwd tsn */
5509 * here we will perform all the data receiver side steps for
5510 * processing FwdTSN, as required in by pr-sctp draft:
5512 * Assume we get FwdTSN(x):
5514 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5515 * + others we have 3) examine and update re-ordering queue on
5516 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5517 * report where we are.
5519 struct sctp_association *asoc;
5520 uint32_t new_cum_tsn, gap;
5521 unsigned int i, fwd_sz, m_size;
5523 struct sctp_stream_in *strm;
5524 struct sctp_queued_to_read *control, *sv;
5527 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5528 SCTPDBG(SCTP_DEBUG_INDATA1,
5529 "Bad size too small/big fwd-tsn\n");
5532 m_size = (stcb->asoc.mapping_array_size << 3);
5533 /*************************************************************/
5534 /* 1. Here we update local cumTSN and shift the bitmap array */
5535 /*************************************************************/
5536 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5538 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5539 /* Already got there ... */
5543 * now we know the new TSN is more advanced, let's find the actual
5546 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5547 asoc->cumulative_tsn = new_cum_tsn;
5548 if (gap >= m_size) {
5549 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5550 struct mbuf *op_err;
5551 char msg[SCTP_DIAG_INFO_LEN];
5554 * out of range (of single byte chunks in the rwnd I
5555 * give out). This must be an attacker.
5558 SCTP_SNPRINTF(msg, sizeof(msg),
5559 "New cum ack %8.8x too high, highest TSN %8.8x",
5560 new_cum_tsn, asoc->highest_tsn_inside_map);
5561 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5562 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5563 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5566 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5568 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5569 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5570 asoc->highest_tsn_inside_map = new_cum_tsn;
5572 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5573 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5575 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5576 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5579 SCTP_TCB_LOCK_ASSERT(stcb);
5580 for (i = 0; i <= gap; i++) {
5581 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5582 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5583 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5584 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5585 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5590 /*************************************************************/
5591 /* 2. Clear up re-assembly queue */
5592 /*************************************************************/
5594 /* This is now done as part of clearing up the stream/seq */
5595 if (asoc->idata_supported == 0) {
5598 /* Flush all the un-ordered data based on cum-tsn */
5599 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5600 for (sid = 0; sid < asoc->streamincnt; sid++) {
5601 strm = &asoc->strmin[sid];
5602 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5603 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5606 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5608 /*******************************************************/
5609 /* 3. Update the PR-stream re-ordering queues and fix */
5610 /* delivery issues as needed. */
5611 /*******************************************************/
5612 fwd_sz -= sizeof(*fwd);
5615 unsigned int num_str;
5618 uint16_t ordered, flags;
5619 struct sctp_strseq *stseq, strseqbuf;
5620 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5622 offset += sizeof(*fwd);
5624 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5625 if (asoc->idata_supported) {
5626 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5628 num_str = fwd_sz / sizeof(struct sctp_strseq);
5630 for (i = 0; i < num_str; i++) {
5631 if (asoc->idata_supported) {
5632 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5633 sizeof(struct sctp_strseq_mid),
5634 (uint8_t *)&strseqbuf_m);
5635 offset += sizeof(struct sctp_strseq_mid);
5636 if (stseq_m == NULL) {
5639 sid = ntohs(stseq_m->sid);
5640 mid = ntohl(stseq_m->mid);
5641 flags = ntohs(stseq_m->flags);
5642 if (flags & PR_SCTP_UNORDERED_FLAG) {
5648 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5649 sizeof(struct sctp_strseq),
5650 (uint8_t *)&strseqbuf);
5651 offset += sizeof(struct sctp_strseq);
5652 if (stseq == NULL) {
5655 sid = ntohs(stseq->sid);
5656 mid = (uint32_t)ntohs(stseq->ssn);
5664 * Ok we now look for the stream/seq on the read
5665 * queue where its not all delivered. If we find it
5666 * we transmute the read entry into a PDI_ABORTED.
5668 if (sid >= asoc->streamincnt) {
5669 /* screwed up streams, stop! */
5672 if ((asoc->str_of_pdapi == sid) &&
5673 (asoc->ssn_of_pdapi == mid)) {
5675 * If this is the one we were partially
5676 * delivering now then we no longer are.
5677 * Note this will change with the reassembly
5680 asoc->fragmented_delivery_inprogress = 0;
5682 strm = &asoc->strmin[sid];
5684 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
5685 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5686 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5690 if (asoc->idata_supported) {
5691 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
5692 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5693 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5697 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5698 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5702 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5703 if ((control->sinfo_stream == sid) &&
5704 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5705 str_seq = (sid << 16) | (0x0000ffff & mid);
5706 control->pdapi_aborted = 1;
5707 sv = stcb->asoc.control_pdapi;
5708 control->end_added = 1;
5709 if (control->on_strm_q == SCTP_ON_ORDERED) {
5710 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5711 if (asoc->size_on_all_streams >= control->length) {
5712 asoc->size_on_all_streams -= control->length;
5715 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5717 asoc->size_on_all_streams = 0;
5720 sctp_ucount_decr(asoc->cnt_on_all_streams);
5721 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5722 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5724 } else if (control->on_strm_q) {
5725 panic("strm: %p ctl: %p unknown %d",
5726 strm, control, control->on_strm_q);
5729 control->on_strm_q = 0;
5730 stcb->asoc.control_pdapi = control;
5731 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5733 SCTP_PARTIAL_DELIVERY_ABORTED,
5735 SCTP_SO_NOT_LOCKED);
5736 stcb->asoc.control_pdapi = sv;
5738 } else if ((control->sinfo_stream == sid) &&
5739 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5740 /* We are past our victim SSN */
5744 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5745 /* Update the sequence number */
5746 strm->last_mid_delivered = mid;
5748 /* now kick the stream the new way */
5749 /* sa_ignore NO_NULL_CHK */
5750 sctp_kick_prsctp_reorder_queue(stcb, strm);
5752 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5755 * Now slide thing forward.
5757 sctp_slide_mapping_arrays(stcb);