2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int hold_rlock);
72 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
77 /* Calculate what the rwnd would be */
79 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
84 * This is really set wrong with respect to a 1-2-m socket. Since
85 * the sb_cc is the count that everyone as put up. When we re-write
86 * sctp_soreceive then we will fix this so that ONLY this
87 * associations data is taken into account.
89 if (stcb->sctp_socket == NULL) {
93 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
94 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
95 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
96 ("size_on_all_streams is %u", asoc->size_on_all_streams));
97 if (stcb->asoc.sb_cc == 0 &&
98 asoc->cnt_on_reasm_queue == 0 &&
99 asoc->cnt_on_all_streams == 0) {
100 /* Full rwnd granted */
101 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
104 /* get actual space */
105 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 * take out what has NOT been put on socket queue and we yet hold
110 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
111 asoc->cnt_on_reasm_queue * MSIZE));
112 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
113 asoc->cnt_on_all_streams * MSIZE));
119 /* what is the overhead of all these rwnd's */
120 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 * If the window gets too small due to ctrl-stuff, reduce it to 1,
123 * even it is 0. SWS engaged
125 if (calc < stcb->asoc.my_rwnd_control_len) {
132 * Build out our readq entry based on the incoming packet.
134 struct sctp_queued_to_read *
135 sctp_build_readq_entry(struct sctp_tcb *stcb,
136 struct sctp_nets *net,
137 uint32_t tsn, uint32_t ppid,
138 uint32_t context, uint16_t sid,
139 uint32_t mid, uint8_t flags,
142 struct sctp_queued_to_read *read_queue_e = NULL;
144 sctp_alloc_a_readq(stcb, read_queue_e);
145 if (read_queue_e == NULL) {
148 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
149 read_queue_e->sinfo_stream = sid;
150 read_queue_e->sinfo_flags = (flags << 8);
151 read_queue_e->sinfo_ppid = ppid;
152 read_queue_e->sinfo_context = context;
153 read_queue_e->sinfo_tsn = tsn;
154 read_queue_e->sinfo_cumtsn = tsn;
155 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
156 read_queue_e->mid = mid;
157 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
158 TAILQ_INIT(&read_queue_e->reasm);
159 read_queue_e->whoFrom = net;
160 atomic_add_int(&net->ref_count, 1);
161 read_queue_e->data = dm;
162 read_queue_e->stcb = stcb;
163 read_queue_e->port_from = stcb->rport;
164 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
165 read_queue_e->do_not_ref_stcb = 1;
168 return (read_queue_e);
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
174 struct sctp_extrcvinfo *seinfo;
175 struct sctp_sndrcvinfo *outinfo;
176 struct sctp_rcvinfo *rcvinfo;
177 struct sctp_nxtinfo *nxtinfo;
184 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 /* user does not want any ancillary data */
192 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
195 seinfo = (struct sctp_extrcvinfo *)sinfo;
196 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
199 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
206 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
209 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
215 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
220 SCTP_BUF_LEN(ret) = 0;
222 /* We need a CMSG header followed by the struct */
223 cmh = mtod(ret, struct cmsghdr *);
225 * Make sure that there is no un-initialized padding between the
226 * cmsg header and cmsg data and after the cmsg data.
229 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 cmh->cmsg_level = IPPROTO_SCTP;
231 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 cmh->cmsg_type = SCTP_RCVINFO;
233 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 rcvinfo->rcv_context = sinfo->sinfo_context;
241 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
246 cmh->cmsg_level = IPPROTO_SCTP;
247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 cmh->cmsg_type = SCTP_NXTINFO;
249 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 nxtinfo->nxt_flags = 0;
252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 nxtinfo->nxt_flags |= SCTP_UNORDERED;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 nxtinfo->nxt_flags |= SCTP_COMPLETE;
261 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
267 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 cmh->cmsg_level = IPPROTO_SCTP;
269 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 cmh->cmsg_type = SCTP_EXTRCV;
273 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 cmh->cmsg_type = SCTP_SNDRCV;
279 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
286 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
288 uint32_t gap, i, cumackp1;
290 int in_r = 0, in_nr = 0;
292 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
295 cumackp1 = asoc->cumulative_tsn + 1;
296 if (SCTP_TSN_GT(cumackp1, tsn)) {
298 * this tsn is behind the cum ack and thus we don't need to
299 * worry about it being moved from one to the other.
303 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
304 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
305 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
306 if ((in_r == 0) && (in_nr == 0)) {
308 panic("Things are really messed up now");
310 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
311 sctp_print_mapping_array(asoc);
315 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
318 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
319 asoc->highest_tsn_inside_nr_map = tsn;
321 if (tsn == asoc->highest_tsn_inside_map) {
322 /* We must back down to see what the new highest is */
323 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
324 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
325 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
326 asoc->highest_tsn_inside_map = i;
332 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
338 sctp_place_control_in_stream(struct sctp_stream_in *strm,
339 struct sctp_association *asoc,
340 struct sctp_queued_to_read *control)
342 struct sctp_queued_to_read *at;
343 struct sctp_readhead *q;
344 uint8_t flags, unordered;
346 flags = (control->sinfo_flags >> 8);
347 unordered = flags & SCTP_DATA_UNORDERED;
349 q = &strm->uno_inqueue;
350 if (asoc->idata_supported == 0) {
351 if (!TAILQ_EMPTY(q)) {
353 * Only one stream can be here in old style
358 TAILQ_INSERT_TAIL(q, control, next_instrm);
359 control->on_strm_q = SCTP_ON_UNORDERED;
365 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
366 control->end_added = 1;
367 control->first_frag_seen = 1;
368 control->last_frag_seen = 1;
370 if (TAILQ_EMPTY(q)) {
372 TAILQ_INSERT_HEAD(q, control, next_instrm);
374 control->on_strm_q = SCTP_ON_UNORDERED;
376 control->on_strm_q = SCTP_ON_ORDERED;
380 TAILQ_FOREACH(at, q, next_instrm) {
381 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
383 * one in queue is bigger than the new one,
384 * insert before this one
386 TAILQ_INSERT_BEFORE(at, control, next_instrm);
388 control->on_strm_q = SCTP_ON_UNORDERED;
390 control->on_strm_q = SCTP_ON_ORDERED;
393 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
395 * Gak, He sent me a duplicate msg id
396 * number?? return -1 to abort.
400 if (TAILQ_NEXT(at, next_instrm) == NULL) {
402 * We are at the end, insert it
405 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
406 sctp_log_strm_del(control, at,
407 SCTP_STR_LOG_FROM_INSERT_TL);
409 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
411 control->on_strm_q = SCTP_ON_UNORDERED;
413 control->on_strm_q = SCTP_ON_ORDERED;
424 sctp_abort_in_reasm(struct sctp_tcb *stcb,
425 struct sctp_queued_to_read *control,
426 struct sctp_tmit_chunk *chk,
427 int *abort_flag, int opspot)
429 char msg[SCTP_DIAG_INFO_LEN];
432 if (stcb->asoc.idata_supported) {
433 SCTP_SNPRINTF(msg, sizeof(msg),
434 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
436 control->fsn_included,
439 chk->rec.data.fsn, chk->rec.data.mid);
441 SCTP_SNPRINTF(msg, sizeof(msg),
442 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
444 control->fsn_included,
448 (uint16_t)chk->rec.data.mid);
450 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
451 sctp_m_freem(chk->data);
453 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
454 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
455 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
460 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
463 * The control could not be placed and must be cleaned.
465 struct sctp_tmit_chunk *chk, *nchk;
467 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
468 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
470 sctp_m_freem(chk->data);
472 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
474 sctp_free_remote_addr(control->whoFrom);
476 sctp_m_freem(control->data);
477 control->data = NULL;
479 sctp_free_a_readq(stcb, control);
483 * Queue the chunk either right into the socket buffer if it is the next one
484 * to go OR put it in the correct place in the delivery queue. If we do
485 * append to the so_buf, keep doing so until we are out of order as
486 * long as the control's entered are non-fragmented.
489 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
490 struct sctp_association *asoc,
491 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
494 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
495 * all the data in one stream this could happen quite rapidly. One
496 * could use the TSN to keep track of things, but this scheme breaks
497 * down in the other type of stream usage that could occur. Send a
498 * single msg to stream 0, send 4Billion messages to stream 1, now
499 * send a message to stream 0. You have a situation where the TSN
500 * has wrapped but not in the stream. Is this worth worrying about
501 * or should we just change our queue sort at the bottom to be by
504 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
505 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
506 * assignment this could happen... and I don't see how this would be
507 * a violation. So for now I am undecided an will leave the sort by
508 * SSN alone. Maybe a hybred approach is the answer
511 struct sctp_queued_to_read *at;
515 struct sctp_stream_in *strm;
516 char msg[SCTP_DIAG_INFO_LEN];
518 strm = &asoc->strmin[control->sinfo_stream];
519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
520 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
522 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
523 /* The incoming sseq is behind where we last delivered? */
524 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
525 strm->last_mid_delivered, control->mid);
527 * throw it in the stream so it gets cleaned up in
528 * association destruction
530 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
531 if (asoc->idata_supported) {
532 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
533 strm->last_mid_delivered, control->sinfo_tsn,
534 control->sinfo_stream, control->mid);
536 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
537 (uint16_t)strm->last_mid_delivered,
539 control->sinfo_stream,
540 (uint16_t)control->mid);
542 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
543 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
544 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
549 asoc->size_on_all_streams += control->length;
550 sctp_ucount_incr(asoc->cnt_on_all_streams);
551 nxt_todel = strm->last_mid_delivered + 1;
552 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
553 /* can be delivered right away? */
554 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
555 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
557 /* EY it wont be queued if it could be delivered directly */
559 if (asoc->size_on_all_streams >= control->length) {
560 asoc->size_on_all_streams -= control->length;
563 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
565 asoc->size_on_all_streams = 0;
568 sctp_ucount_decr(asoc->cnt_on_all_streams);
569 strm->last_mid_delivered++;
570 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
571 sctp_add_to_readq(stcb->sctp_ep, stcb,
573 &stcb->sctp_socket->so_rcv, 1,
574 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
575 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
577 nxt_todel = strm->last_mid_delivered + 1;
578 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
579 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
580 if (control->on_strm_q == SCTP_ON_ORDERED) {
581 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
582 if (asoc->size_on_all_streams >= control->length) {
583 asoc->size_on_all_streams -= control->length;
586 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
588 asoc->size_on_all_streams = 0;
591 sctp_ucount_decr(asoc->cnt_on_all_streams);
594 panic("Huh control: %p is on_strm_q: %d",
595 control, control->on_strm_q);
598 control->on_strm_q = 0;
599 strm->last_mid_delivered++;
601 * We ignore the return of deliver_data here
602 * since we always can hold the chunk on the
603 * d-queue. And we have a finite number that
604 * can be delivered from the strq.
606 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
607 sctp_log_strm_del(control, NULL,
608 SCTP_STR_LOG_FROM_IMMED_DEL);
610 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
611 sctp_add_to_readq(stcb->sctp_ep, stcb,
613 &stcb->sctp_socket->so_rcv, 1,
614 SCTP_READ_LOCK_NOT_HELD,
617 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
625 * Ok, we did not deliver this guy, find the correct place
626 * to put it on the queue.
628 if (sctp_place_control_in_stream(strm, asoc, control)) {
629 SCTP_SNPRINTF(msg, sizeof(msg),
630 "Queue to str MID: %u duplicate", control->mid);
631 sctp_clean_up_control(stcb, control);
632 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
633 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
634 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
641 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
643 struct mbuf *m, *prev = NULL;
644 struct sctp_tcb *stcb;
646 stcb = control->stcb;
647 control->held_length = 0;
651 if (SCTP_BUF_LEN(m) == 0) {
652 /* Skip mbufs with NO length */
655 control->data = sctp_m_free(m);
658 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
659 m = SCTP_BUF_NEXT(prev);
662 control->tail_mbuf = prev;
667 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
668 if (control->on_read_q) {
670 * On read queue so we must increment the SB stuff,
671 * we assume caller has done any locks of SB.
673 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
675 m = SCTP_BUF_NEXT(m);
678 control->tail_mbuf = prev;
683 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
685 struct mbuf *prev = NULL;
686 struct sctp_tcb *stcb;
688 stcb = control->stcb;
691 panic("Control broken");
696 if (control->tail_mbuf == NULL) {
698 sctp_m_freem(control->data);
700 sctp_setup_tail_pointer(control);
703 control->tail_mbuf->m_next = m;
705 if (SCTP_BUF_LEN(m) == 0) {
706 /* Skip mbufs with NO length */
709 control->tail_mbuf->m_next = sctp_m_free(m);
710 m = control->tail_mbuf->m_next;
712 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
713 m = SCTP_BUF_NEXT(prev);
716 control->tail_mbuf = prev;
721 if (control->on_read_q) {
723 * On read queue so we must increment the SB stuff,
724 * we assume caller has done any locks of SB.
726 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
728 *added += SCTP_BUF_LEN(m);
729 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
730 m = SCTP_BUF_NEXT(m);
733 control->tail_mbuf = prev;
738 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
740 memset(nc, 0, sizeof(struct sctp_queued_to_read));
741 nc->sinfo_stream = control->sinfo_stream;
742 nc->mid = control->mid;
743 TAILQ_INIT(&nc->reasm);
744 nc->top_fsn = control->top_fsn;
745 nc->mid = control->mid;
746 nc->sinfo_flags = control->sinfo_flags;
747 nc->sinfo_ppid = control->sinfo_ppid;
748 nc->sinfo_context = control->sinfo_context;
749 nc->fsn_included = 0xffffffff;
750 nc->sinfo_tsn = control->sinfo_tsn;
751 nc->sinfo_cumtsn = control->sinfo_cumtsn;
752 nc->sinfo_assoc_id = control->sinfo_assoc_id;
753 nc->whoFrom = control->whoFrom;
754 atomic_add_int(&nc->whoFrom->ref_count, 1);
755 nc->stcb = control->stcb;
756 nc->port_from = control->port_from;
757 nc->do_not_ref_stcb = control->do_not_ref_stcb;
761 sctp_reset_a_control(struct sctp_queued_to_read *control,
762 struct sctp_inpcb *inp, uint32_t tsn)
764 control->fsn_included = tsn;
765 if (control->on_read_q) {
767 * We have to purge it from there, hopefully this will work
770 TAILQ_REMOVE(&inp->read_queue, control, next);
771 control->on_read_q = 0;
776 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
777 struct sctp_association *asoc,
778 struct sctp_stream_in *strm,
779 struct sctp_queued_to_read *control,
781 int inp_read_lock_held)
784 * Special handling for the old un-ordered data chunk. All the
785 * chunks/TSN's go to mid 0. So we have to do the old style watching
786 * to see if we have it all. If you return one, no other control
787 * entries on the un-ordered queue will be looked at. In theory
788 * there should be no others entries in reality, unless the guy is
789 * sending both unordered NDATA and unordered DATA...
791 struct sctp_tmit_chunk *chk, *lchk, *tchk;
793 struct sctp_queued_to_read *nc;
796 if (control->first_frag_seen == 0) {
797 /* Nothing we can do, we have not seen the first piece yet */
800 /* Collapse any we can */
803 fsn = control->fsn_included + 1;
804 /* Now what can we add? */
805 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
806 if (chk->rec.data.fsn == fsn) {
808 sctp_alloc_a_readq(stcb, nc);
812 memset(nc, 0, sizeof(struct sctp_queued_to_read));
813 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
814 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
818 if (control->end_added) {
820 if (!TAILQ_EMPTY(&control->reasm)) {
822 * Ok we have to move anything left
823 * on the control queue to a new
826 sctp_build_readq_entry_from_ctl(nc, control);
827 tchk = TAILQ_FIRST(&control->reasm);
828 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
829 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
830 if (asoc->size_on_reasm_queue >= tchk->send_size) {
831 asoc->size_on_reasm_queue -= tchk->send_size;
834 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
836 asoc->size_on_reasm_queue = 0;
839 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
840 nc->first_frag_seen = 1;
841 nc->fsn_included = tchk->rec.data.fsn;
842 nc->data = tchk->data;
843 nc->sinfo_ppid = tchk->rec.data.ppid;
844 nc->sinfo_tsn = tchk->rec.data.tsn;
845 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
847 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
848 sctp_setup_tail_pointer(nc);
849 tchk = TAILQ_FIRST(&control->reasm);
851 /* Spin the rest onto the queue */
853 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
854 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
855 tchk = TAILQ_FIRST(&control->reasm);
858 * Now lets add it to the queue
859 * after removing control
861 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
862 nc->on_strm_q = SCTP_ON_UNORDERED;
863 if (control->on_strm_q) {
864 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
865 control->on_strm_q = 0;
868 if (control->pdapi_started) {
869 strm->pd_api_started = 0;
870 control->pdapi_started = 0;
872 if (control->on_strm_q) {
873 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
874 control->on_strm_q = 0;
875 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
877 if (control->on_read_q == 0) {
878 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
879 &stcb->sctp_socket->so_rcv, control->end_added,
880 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
882 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
883 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
885 * Switch to the new guy and
891 if (nc->on_strm_q == 0) {
892 sctp_free_a_readq(stcb, nc);
897 sctp_free_a_readq(stcb, nc);
904 if (cnt_added && strm->pd_api_started) {
905 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
907 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
908 strm->pd_api_started = 1;
909 control->pdapi_started = 1;
910 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
911 &stcb->sctp_socket->so_rcv, control->end_added,
912 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
913 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
921 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
922 struct sctp_association *asoc,
923 struct sctp_queued_to_read *control,
924 struct sctp_tmit_chunk *chk,
927 struct sctp_tmit_chunk *at;
931 * Here we need to place the chunk into the control structure sorted
932 * in the correct order.
934 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
935 /* Its the very first one. */
936 SCTPDBG(SCTP_DEBUG_XXX,
937 "chunk is a first fsn: %u becomes fsn_included\n",
939 at = TAILQ_FIRST(&control->reasm);
940 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
942 * The first chunk in the reassembly is a smaller
943 * TSN than this one, even though this has a first,
944 * it must be from a subsequent msg.
948 if (control->first_frag_seen) {
950 * In old un-ordered we can reassembly on one
951 * control multiple messages. As long as the next
952 * FIRST is greater then the old first (TSN i.e. FSN
958 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
960 * Easy way the start of a new guy beyond
965 if ((chk->rec.data.fsn == control->fsn_included) ||
966 (control->pdapi_started)) {
968 * Ok this should not happen, if it does we
969 * started the pd-api on the higher TSN
970 * (since the equals part is a TSN failure
973 * We are completly hosed in that case since
974 * I have no way to recover. This really
975 * will only happen if we can get more TSN's
976 * higher before the pd-api-point.
978 sctp_abort_in_reasm(stcb, control, chk,
980 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
985 * Ok we have two firsts and the one we just got is
986 * smaller than the one we previously placed.. yuck!
987 * We must swap them out.
990 tdata = control->data;
991 control->data = chk->data;
993 /* Save the lengths */
994 chk->send_size = control->length;
995 /* Recompute length of control and tail pointer */
996 sctp_setup_tail_pointer(control);
997 /* Fix the FSN included */
998 tmp = control->fsn_included;
999 control->fsn_included = chk->rec.data.fsn;
1000 chk->rec.data.fsn = tmp;
1001 /* Fix the TSN included */
1002 tmp = control->sinfo_tsn;
1003 control->sinfo_tsn = chk->rec.data.tsn;
1004 chk->rec.data.tsn = tmp;
1005 /* Fix the PPID included */
1006 tmp = control->sinfo_ppid;
1007 control->sinfo_ppid = chk->rec.data.ppid;
1008 chk->rec.data.ppid = tmp;
1009 /* Fix tail pointer */
1012 control->first_frag_seen = 1;
1013 control->fsn_included = chk->rec.data.fsn;
1014 control->top_fsn = chk->rec.data.fsn;
1015 control->sinfo_tsn = chk->rec.data.tsn;
1016 control->sinfo_ppid = chk->rec.data.ppid;
1017 control->data = chk->data;
1018 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1020 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1021 sctp_setup_tail_pointer(control);
1026 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1027 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1029 * This one in queue is bigger than the new one,
1030 * insert the new one before at.
1032 asoc->size_on_reasm_queue += chk->send_size;
1033 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1035 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1037 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1039 * They sent a duplicate fsn number. This really
1040 * should not happen since the FSN is a TSN and it
1041 * should have been dropped earlier.
1043 sctp_abort_in_reasm(stcb, control, chk,
1045 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1049 if (inserted == 0) {
1050 /* Its at the end */
1051 asoc->size_on_reasm_queue += chk->send_size;
1052 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1053 control->top_fsn = chk->rec.data.fsn;
1054 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1059 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1060 struct sctp_stream_in *strm, int inp_read_lock_held)
1063 * Given a stream, strm, see if any of the SSN's on it that are
1064 * fragmented are ready to deliver. If so go ahead and place them on
1065 * the read queue. In so placing if we have hit the end, then we
1066 * need to remove them from the stream's queue.
1068 struct sctp_queued_to_read *control, *nctl = NULL;
1069 uint32_t next_to_del;
1073 if (stcb->sctp_socket) {
1074 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1075 stcb->sctp_ep->partial_delivery_point);
1077 pd_point = stcb->sctp_ep->partial_delivery_point;
1079 control = TAILQ_FIRST(&strm->uno_inqueue);
1081 if ((control != NULL) &&
1082 (asoc->idata_supported == 0)) {
1083 /* Special handling needed for "old" data format */
1084 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1088 if (strm->pd_api_started) {
1089 /* Can't add more */
1093 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1094 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1095 nctl = TAILQ_NEXT(control, next_instrm);
1096 if (control->end_added) {
1097 /* We just put the last bit on */
1098 if (control->on_strm_q) {
1100 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1101 panic("Huh control: %p on_q: %d -- not unordered?",
1102 control, control->on_strm_q);
1105 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1106 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1107 if (asoc->size_on_all_streams >= control->length) {
1108 asoc->size_on_all_streams -= control->length;
1111 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1113 asoc->size_on_all_streams = 0;
1116 sctp_ucount_decr(asoc->cnt_on_all_streams);
1117 control->on_strm_q = 0;
1119 if (control->on_read_q == 0) {
1120 sctp_add_to_readq(stcb->sctp_ep, stcb,
1122 &stcb->sctp_socket->so_rcv, control->end_added,
1123 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1126 /* Can we do a PD-API for this un-ordered guy? */
1127 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1128 strm->pd_api_started = 1;
1129 control->pdapi_started = 1;
1130 sctp_add_to_readq(stcb->sctp_ep, stcb,
1132 &stcb->sctp_socket->so_rcv, control->end_added,
1133 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1141 control = TAILQ_FIRST(&strm->inqueue);
1142 if (strm->pd_api_started) {
1143 /* Can't add more */
1146 if (control == NULL) {
1149 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1151 * Ok the guy at the top was being partially delivered
1152 * completed, so we remove it. Note the pd_api flag was
1153 * taken off when the chunk was merged on in
1154 * sctp_queue_data_for_reasm below.
1156 nctl = TAILQ_NEXT(control, next_instrm);
1157 SCTPDBG(SCTP_DEBUG_XXX,
1158 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1159 control, control->end_added, control->mid,
1160 control->top_fsn, control->fsn_included,
1161 strm->last_mid_delivered);
1162 if (control->end_added) {
1163 if (control->on_strm_q) {
1165 if (control->on_strm_q != SCTP_ON_ORDERED) {
1166 panic("Huh control: %p on_q: %d -- not ordered?",
1167 control, control->on_strm_q);
1170 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1171 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1172 if (asoc->size_on_all_streams >= control->length) {
1173 asoc->size_on_all_streams -= control->length;
1176 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1178 asoc->size_on_all_streams = 0;
1181 sctp_ucount_decr(asoc->cnt_on_all_streams);
1182 control->on_strm_q = 0;
1184 if (strm->pd_api_started && control->pdapi_started) {
1185 control->pdapi_started = 0;
1186 strm->pd_api_started = 0;
1188 if (control->on_read_q == 0) {
1189 sctp_add_to_readq(stcb->sctp_ep, stcb,
1191 &stcb->sctp_socket->so_rcv, control->end_added,
1192 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1197 if (strm->pd_api_started) {
1199 * Can't add more must have gotten an un-ordered above being
1200 * partially delivered.
1205 next_to_del = strm->last_mid_delivered + 1;
1207 SCTPDBG(SCTP_DEBUG_XXX,
1208 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1209 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1211 nctl = TAILQ_NEXT(control, next_instrm);
1212 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1213 (control->first_frag_seen)) {
1216 /* Ok we can deliver it onto the stream. */
1217 if (control->end_added) {
1218 /* We are done with it afterwards */
1219 if (control->on_strm_q) {
1221 if (control->on_strm_q != SCTP_ON_ORDERED) {
1222 panic("Huh control: %p on_q: %d -- not ordered?",
1223 control, control->on_strm_q);
1226 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1227 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1228 if (asoc->size_on_all_streams >= control->length) {
1229 asoc->size_on_all_streams -= control->length;
1232 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1234 asoc->size_on_all_streams = 0;
1237 sctp_ucount_decr(asoc->cnt_on_all_streams);
1238 control->on_strm_q = 0;
1242 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1244 * A singleton now slipping through - mark
1245 * it non-revokable too
1247 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1248 } else if (control->end_added == 0) {
1250 * Check if we can defer adding until its
1253 if ((control->length < pd_point) || (strm->pd_api_started)) {
1255 * Don't need it or cannot add more
1256 * (one being delivered that way)
1261 done = (control->end_added) && (control->last_frag_seen);
1262 if (control->on_read_q == 0) {
1264 if (asoc->size_on_all_streams >= control->length) {
1265 asoc->size_on_all_streams -= control->length;
1268 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1270 asoc->size_on_all_streams = 0;
1273 strm->pd_api_started = 1;
1274 control->pdapi_started = 1;
1276 sctp_add_to_readq(stcb->sctp_ep, stcb,
1278 &stcb->sctp_socket->so_rcv, control->end_added,
1279 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1281 strm->last_mid_delivered = next_to_del;
1293 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1294 struct sctp_stream_in *strm,
1295 struct sctp_tcb *stcb, struct sctp_association *asoc,
1296 struct sctp_tmit_chunk *chk, int hold_rlock)
1299 * Given a control and a chunk, merge the data from the chk onto the
1300 * control and free up the chunk resources.
1305 if (control->on_read_q && (hold_rlock == 0)) {
1307 * Its being pd-api'd so we must do some locks.
1309 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1312 if (control->data == NULL) {
1313 control->data = chk->data;
1314 sctp_setup_tail_pointer(control);
1316 sctp_add_to_tail_pointer(control, chk->data, &added);
1318 control->fsn_included = chk->rec.data.fsn;
1319 asoc->size_on_reasm_queue -= chk->send_size;
1320 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1321 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1323 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1324 control->first_frag_seen = 1;
1325 control->sinfo_tsn = chk->rec.data.tsn;
1326 control->sinfo_ppid = chk->rec.data.ppid;
1328 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1330 if ((control->on_strm_q) && (control->on_read_q)) {
1331 if (control->pdapi_started) {
1332 control->pdapi_started = 0;
1333 strm->pd_api_started = 0;
1335 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1337 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1338 control->on_strm_q = 0;
1339 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1341 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1343 * Don't need to decrement
1344 * size_on_all_streams, since control is on
1347 sctp_ucount_decr(asoc->cnt_on_all_streams);
1348 control->on_strm_q = 0;
1350 } else if (control->on_strm_q) {
1351 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1352 control->on_strm_q);
1356 control->end_added = 1;
1357 control->last_frag_seen = 1;
1360 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1362 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1367 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1368 * queue, see if anthing can be delivered. If so pull it off (or as much as
1369 * we can. If we run out of space then we must dump what we can and set the
1370 * appropriate flag to say we queued what we could.
1373 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1374 struct sctp_queued_to_read *control,
1375 struct sctp_tmit_chunk *chk,
1376 int created_control,
1377 int *abort_flag, uint32_t tsn)
1380 struct sctp_tmit_chunk *at, *nat;
1381 struct sctp_stream_in *strm;
1382 int do_wakeup, unordered;
1385 strm = &asoc->strmin[control->sinfo_stream];
1387 * For old un-ordered data chunks.
1389 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1394 /* Must be added to the stream-in queue */
1395 if (created_control) {
1396 if ((unordered == 0) || (asoc->idata_supported)) {
1397 sctp_ucount_incr(asoc->cnt_on_all_streams);
1399 if (sctp_place_control_in_stream(strm, asoc, control)) {
1400 /* Duplicate SSN? */
1401 sctp_abort_in_reasm(stcb, control, chk,
1403 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1404 sctp_clean_up_control(stcb, control);
1407 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1409 * Ok we created this control and now lets validate
1410 * that its legal i.e. there is a B bit set, if not
1411 * and we have up to the cum-ack then its invalid.
1413 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1414 sctp_abort_in_reasm(stcb, control, chk,
1416 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1421 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1422 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1426 * Ok we must queue the chunk into the reasembly portion: o if its
1427 * the first it goes to the control mbuf. o if its not first but the
1428 * next in sequence it goes to the control, and each succeeding one
1429 * in order also goes. o if its not in order we place it on the list
1432 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1433 /* Its the very first one. */
1434 SCTPDBG(SCTP_DEBUG_XXX,
1435 "chunk is a first fsn: %u becomes fsn_included\n",
1437 if (control->first_frag_seen) {
1439 * Error on senders part, they either sent us two
1440 * data chunks with FIRST, or they sent two
1441 * un-ordered chunks that were fragmented at the
1442 * same time in the same stream.
1444 sctp_abort_in_reasm(stcb, control, chk,
1446 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1449 control->first_frag_seen = 1;
1450 control->sinfo_ppid = chk->rec.data.ppid;
1451 control->sinfo_tsn = chk->rec.data.tsn;
1452 control->fsn_included = chk->rec.data.fsn;
1453 control->data = chk->data;
1454 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1456 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1457 sctp_setup_tail_pointer(control);
1458 asoc->size_on_all_streams += control->length;
1460 /* Place the chunk in our list */
1463 if (control->last_frag_seen == 0) {
1464 /* Still willing to raise highest FSN seen */
1465 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1466 SCTPDBG(SCTP_DEBUG_XXX,
1467 "We have a new top_fsn: %u\n",
1469 control->top_fsn = chk->rec.data.fsn;
1471 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1472 SCTPDBG(SCTP_DEBUG_XXX,
1473 "The last fsn is now in place fsn: %u\n",
1475 control->last_frag_seen = 1;
1476 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1477 SCTPDBG(SCTP_DEBUG_XXX,
1478 "New fsn: %u is not at top_fsn: %u -- abort\n",
1481 sctp_abort_in_reasm(stcb, control, chk,
1483 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1487 if (asoc->idata_supported || control->first_frag_seen) {
1489 * For IDATA we always check since we know
1490 * that the first fragment is 0. For old
1491 * DATA we have to receive the first before
1492 * we know the first FSN (which is the TSN).
1494 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1496 * We have already delivered up to
1499 sctp_abort_in_reasm(stcb, control, chk,
1501 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1506 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1507 /* Second last? huh? */
1508 SCTPDBG(SCTP_DEBUG_XXX,
1509 "Duplicate last fsn: %u (top: %u) -- abort\n",
1510 chk->rec.data.fsn, control->top_fsn);
1511 sctp_abort_in_reasm(stcb, control,
1513 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1516 if (asoc->idata_supported || control->first_frag_seen) {
1518 * For IDATA we always check since we know
1519 * that the first fragment is 0. For old
1520 * DATA we have to receive the first before
1521 * we know the first FSN (which is the TSN).
1524 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1526 * We have already delivered up to
1529 SCTPDBG(SCTP_DEBUG_XXX,
1530 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1531 chk->rec.data.fsn, control->fsn_included);
1532 sctp_abort_in_reasm(stcb, control, chk,
1534 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1539 * validate not beyond top FSN if we have seen last
1542 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1543 SCTPDBG(SCTP_DEBUG_XXX,
1544 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1547 sctp_abort_in_reasm(stcb, control, chk,
1549 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1554 * If we reach here, we need to place the new chunk in the
1555 * reassembly for this control.
1557 SCTPDBG(SCTP_DEBUG_XXX,
1558 "chunk is a not first fsn: %u needs to be inserted\n",
1560 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1561 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1562 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1563 /* Last not at the end? huh? */
1564 SCTPDBG(SCTP_DEBUG_XXX,
1565 "Last fragment not last in list: -- abort\n");
1566 sctp_abort_in_reasm(stcb, control,
1568 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1572 * This one in queue is bigger than the new
1573 * one, insert the new one before at.
1575 SCTPDBG(SCTP_DEBUG_XXX,
1576 "Insert it before fsn: %u\n",
1578 asoc->size_on_reasm_queue += chk->send_size;
1579 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1580 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1583 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1585 * Gak, He sent me a duplicate str seq
1589 * foo bar, I guess I will just free this
1590 * new guy, should we abort too? FIX ME
1591 * MAYBE? Or it COULD be that the SSN's have
1592 * wrapped. Maybe I should compare to TSN
1593 * somehow... sigh for now just blow away
1596 SCTPDBG(SCTP_DEBUG_XXX,
1597 "Duplicate to fsn: %u -- abort\n",
1599 sctp_abort_in_reasm(stcb, control,
1601 SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1605 if (inserted == 0) {
1606 /* Goes on the end */
1607 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1609 asoc->size_on_reasm_queue += chk->send_size;
1610 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1611 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1615 * Ok lets see if we can suck any up into the control structure that
1616 * are in seq if it makes sense.
1620 * If the first fragment has not been seen there is no sense in
1623 if (control->first_frag_seen) {
1624 next_fsn = control->fsn_included + 1;
1625 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1626 if (at->rec.data.fsn == next_fsn) {
1627 /* We can add this one now to the control */
1628 SCTPDBG(SCTP_DEBUG_XXX,
1629 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1632 next_fsn, control->fsn_included);
1633 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1634 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1635 if (control->on_read_q) {
1639 * We only add to the
1640 * size-on-all-streams if its not on
1641 * the read q. The read q flag will
1642 * cause a sballoc so its accounted
1645 asoc->size_on_all_streams += lenadded;
1648 if (control->end_added && control->pdapi_started) {
1649 if (strm->pd_api_started) {
1650 strm->pd_api_started = 0;
1651 control->pdapi_started = 0;
1653 if (control->on_read_q == 0) {
1654 sctp_add_to_readq(stcb->sctp_ep, stcb,
1656 &stcb->sctp_socket->so_rcv, control->end_added,
1657 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1667 /* Need to wakeup the reader */
1668 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1672 static struct sctp_queued_to_read *
1673 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1675 struct sctp_queued_to_read *control;
1678 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1679 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1684 if (idata_supported) {
1685 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1686 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1691 control = TAILQ_FIRST(&strm->uno_inqueue);
1698 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1699 struct mbuf **m, int offset, int chk_length,
1700 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1701 int *break_flag, int last_chunk, uint8_t chk_type)
1703 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1704 struct sctp_stream_in *strm;
1705 uint32_t tsn, fsn, gap, mid;
1708 int need_reasm_check = 0;
1710 struct mbuf *op_err;
1711 char msg[SCTP_DIAG_INFO_LEN];
1712 struct sctp_queued_to_read *control, *ncontrol;
1715 struct sctp_stream_reset_list *liste;
1718 int created_control = 0;
1720 if (chk_type == SCTP_IDATA) {
1721 struct sctp_idata_chunk *chunk, chunk_buf;
1723 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1724 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1725 chk_flags = chunk->ch.chunk_flags;
1726 clen = sizeof(struct sctp_idata_chunk);
1727 tsn = ntohl(chunk->dp.tsn);
1728 sid = ntohs(chunk->dp.sid);
1729 mid = ntohl(chunk->dp.mid);
1730 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1732 ppid = chunk->dp.ppid_fsn.ppid;
1734 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1735 ppid = 0xffffffff; /* Use as an invalid value. */
1738 struct sctp_data_chunk *chunk, chunk_buf;
1740 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1741 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1742 chk_flags = chunk->ch.chunk_flags;
1743 clen = sizeof(struct sctp_data_chunk);
1744 tsn = ntohl(chunk->dp.tsn);
1745 sid = ntohs(chunk->dp.sid);
1746 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1748 ppid = chunk->dp.ppid;
1750 if ((size_t)chk_length == clen) {
1752 * Need to send an abort since we had a empty data chunk.
1754 op_err = sctp_generate_no_user_data_cause(tsn);
1755 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1756 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1760 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1761 asoc->send_sack = 1;
1763 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1764 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1765 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1770 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1771 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1772 /* It is a duplicate */
1773 SCTP_STAT_INCR(sctps_recvdupdata);
1774 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1775 /* Record a dup for the next outbound sack */
1776 asoc->dup_tsns[asoc->numduptsns] = tsn;
1779 asoc->send_sack = 1;
1782 /* Calculate the number of TSN's between the base and this TSN */
1783 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1784 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1785 /* Can't hold the bit in the mapping at max array, toss it */
1788 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1789 SCTP_TCB_LOCK_ASSERT(stcb);
1790 if (sctp_expand_mapping_array(asoc, gap)) {
1791 /* Can't expand, drop it */
1795 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1798 /* See if we have received this one already */
1799 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1800 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1801 SCTP_STAT_INCR(sctps_recvdupdata);
1802 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1803 /* Record a dup for the next outbound sack */
1804 asoc->dup_tsns[asoc->numduptsns] = tsn;
1807 asoc->send_sack = 1;
1811 * Check to see about the GONE flag, duplicates would cause a sack
1812 * to be sent up above
1814 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1815 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1816 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1818 * wait a minute, this guy is gone, there is no longer a
1819 * receiver. Send peer an ABORT!
1821 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1822 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1827 * Now before going further we see if there is room. If NOT then we
1828 * MAY let one through only IF this TSN is the one we are waiting
1829 * for on a partial delivery API.
1832 /* Is the stream valid? */
1833 if (sid >= asoc->streamincnt) {
1834 struct sctp_error_invalid_stream *cause;
1836 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1837 0, M_NOWAIT, 1, MT_DATA);
1838 if (op_err != NULL) {
1839 /* add some space up front so prepend will work well */
1840 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1841 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1843 * Error causes are just param's and this one has
1844 * two back to back phdr, one with the error type
1845 * and size, the other with the streamid and a rsvd
1847 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1848 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1849 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1850 cause->stream_id = htons(sid);
1851 cause->reserved = htons(0);
1852 sctp_queue_op_err(stcb, op_err);
1854 SCTP_STAT_INCR(sctps_badsid);
1855 SCTP_TCB_LOCK_ASSERT(stcb);
1856 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1857 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1858 asoc->highest_tsn_inside_nr_map = tsn;
1860 if (tsn == (asoc->cumulative_tsn + 1)) {
1861 /* Update cum-ack */
1862 asoc->cumulative_tsn = tsn;
1867 * If its a fragmented message, lets see if we can find the control
1868 * on the reassembly queues.
1870 if ((chk_type == SCTP_IDATA) &&
1871 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1874 * The first *must* be fsn 0, and other (middle/end) pieces
1875 * can *not* be fsn 0. XXX: This can happen in case of a
1876 * wrap around. Ignore is for now.
1878 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1881 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1882 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1883 chk_flags, control);
1884 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1885 /* See if we can find the re-assembly entity */
1886 if (control != NULL) {
1887 /* We found something, does it belong? */
1888 if (ordered && (mid != control->mid)) {
1889 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1891 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1892 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1893 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1897 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1899 * We can't have a switched order with an
1902 SCTP_SNPRINTF(msg, sizeof(msg),
1903 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1907 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1909 * We can't have a switched unordered with a
1912 SCTP_SNPRINTF(msg, sizeof(msg),
1913 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1920 * Its a complete segment. Lets validate we don't have a
1921 * re-assembly going on with the same Stream/Seq (for
1922 * ordered) or in the same Stream for unordered.
1924 if (control != NULL) {
1925 if (ordered || asoc->idata_supported) {
1926 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1928 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1931 if ((tsn == control->fsn_included + 1) &&
1932 (control->end_added == 0)) {
1933 SCTP_SNPRINTF(msg, sizeof(msg),
1934 "Illegal message sequence, missing end for MID: %8.8x",
1935 control->fsn_included);
1943 /* now do the tests */
1944 if (((asoc->cnt_on_all_streams +
1945 asoc->cnt_on_reasm_queue +
1946 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1947 (((int)asoc->my_rwnd) <= 0)) {
1949 * When we have NO room in the rwnd we check to make sure
1950 * the reader is doing its job...
1952 if (stcb->sctp_socket->so_rcv.sb_cc) {
1953 /* some to read, wake-up */
1954 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1956 /* now is it in the mapping array of what we have accepted? */
1957 if (chk_type == SCTP_DATA) {
1958 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1959 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1960 /* Nope not in the valid range dump it */
1962 sctp_set_rwnd(stcb, asoc);
1963 if ((asoc->cnt_on_all_streams +
1964 asoc->cnt_on_reasm_queue +
1965 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1966 SCTP_STAT_INCR(sctps_datadropchklmt);
1968 SCTP_STAT_INCR(sctps_datadroprwnd);
1974 if (control == NULL) {
1977 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1982 #ifdef SCTP_ASOCLOG_OF_TSNS
1983 SCTP_TCB_LOCK_ASSERT(stcb);
1984 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1985 asoc->tsn_in_at = 0;
1986 asoc->tsn_in_wrapped = 1;
1988 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1989 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1990 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1991 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1992 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1993 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1994 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1995 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1999 * Before we continue lets validate that we are not being fooled by
2000 * an evil attacker. We can only have Nk chunks based on our TSN
2001 * spread allowed by the mapping array N * 8 bits, so there is no
2002 * way our stream sequence numbers could have wrapped. We of course
2003 * only validate the FIRST fragment so the bit must be set.
2005 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2006 (TAILQ_EMPTY(&asoc->resetHead)) &&
2007 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2008 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2009 /* The incoming sseq is behind where we last delivered? */
2010 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2011 mid, asoc->strmin[sid].last_mid_delivered);
2013 if (asoc->idata_supported) {
2014 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2015 asoc->strmin[sid].last_mid_delivered,
2020 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2021 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2026 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2027 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2028 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2032 if (chk_type == SCTP_IDATA) {
2033 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2035 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2037 if (last_chunk == 0) {
2038 if (chk_type == SCTP_IDATA) {
2039 dmbuf = SCTP_M_COPYM(*m,
2040 (offset + sizeof(struct sctp_idata_chunk)),
2043 dmbuf = SCTP_M_COPYM(*m,
2044 (offset + sizeof(struct sctp_data_chunk)),
2047 #ifdef SCTP_MBUF_LOGGING
2048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2049 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2053 /* We can steal the last chunk */
2057 /* lop off the top part */
2058 if (chk_type == SCTP_IDATA) {
2059 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2061 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2063 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2064 l_len = SCTP_BUF_LEN(dmbuf);
2067 * need to count up the size hopefully does not hit
2073 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2074 l_len += SCTP_BUF_LEN(lat);
2077 if (l_len > the_len) {
2078 /* Trim the end round bytes off too */
2079 m_adj(dmbuf, -(l_len - the_len));
2082 if (dmbuf == NULL) {
2083 SCTP_STAT_INCR(sctps_nomem);
2087 * Now no matter what, we need a control, get one if we don't have
2088 * one (we may have gotten it above when we found the message was
2091 if (control == NULL) {
2092 sctp_alloc_a_readq(stcb, control);
2093 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2098 if (control == NULL) {
2099 SCTP_STAT_INCR(sctps_nomem);
2102 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2105 control->data = dmbuf;
2106 control->tail_mbuf = NULL;
2107 for (mm = control->data; mm; mm = mm->m_next) {
2108 control->length += SCTP_BUF_LEN(mm);
2109 if (SCTP_BUF_NEXT(mm) == NULL) {
2110 control->tail_mbuf = mm;
2113 control->end_added = 1;
2114 control->last_frag_seen = 1;
2115 control->first_frag_seen = 1;
2116 control->fsn_included = fsn;
2117 control->top_fsn = fsn;
2119 created_control = 1;
2121 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2122 chk_flags, ordered, mid, control);
2123 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2124 TAILQ_EMPTY(&asoc->resetHead) &&
2126 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2127 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2128 /* Candidate for express delivery */
2130 * Its not fragmented, No PD-API is up, Nothing in the
2131 * delivery queue, Its un-ordered OR ordered and the next to
2132 * deliver AND nothing else is stuck on the stream queue,
2133 * And there is room for it in the socket buffer. Lets just
2134 * stuff it up the buffer....
2136 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2137 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2138 asoc->highest_tsn_inside_nr_map = tsn;
2140 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2143 sctp_add_to_readq(stcb->sctp_ep, stcb,
2144 control, &stcb->sctp_socket->so_rcv,
2145 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2147 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2148 /* for ordered, bump what we delivered */
2149 asoc->strmin[sid].last_mid_delivered++;
2151 SCTP_STAT_INCR(sctps_recvexpress);
2152 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2153 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2154 SCTP_STR_LOG_FROM_EXPRS_DEL);
2157 goto finish_express_del;
2160 /* Now will we need a chunk too? */
2161 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2162 sctp_alloc_a_chunk(stcb, chk);
2164 /* No memory so we drop the chunk */
2165 SCTP_STAT_INCR(sctps_nomem);
2166 if (last_chunk == 0) {
2167 /* we copied it, free the copy */
2168 sctp_m_freem(dmbuf);
2172 chk->rec.data.tsn = tsn;
2173 chk->no_fr_allowed = 0;
2174 chk->rec.data.fsn = fsn;
2175 chk->rec.data.mid = mid;
2176 chk->rec.data.sid = sid;
2177 chk->rec.data.ppid = ppid;
2178 chk->rec.data.context = stcb->asoc.context;
2179 chk->rec.data.doing_fast_retransmit = 0;
2180 chk->rec.data.rcv_flags = chk_flags;
2182 chk->send_size = the_len;
2184 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2187 atomic_add_int(&net->ref_count, 1);
2190 /* Set the appropriate TSN mark */
2191 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2192 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2193 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2194 asoc->highest_tsn_inside_nr_map = tsn;
2197 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2198 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2199 asoc->highest_tsn_inside_map = tsn;
2202 /* Now is it complete (i.e. not fragmented)? */
2203 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2205 * Special check for when streams are resetting. We could be
2206 * more smart about this and check the actual stream to see
2207 * if it is not being reset.. that way we would not create a
2208 * HOLB when amongst streams being reset and those not being
2212 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2213 SCTP_TSN_GT(tsn, liste->tsn)) {
2215 * yep its past where we need to reset... go ahead
2218 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2220 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2222 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2223 unsigned char inserted = 0;
2225 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2226 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2230 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2235 if (inserted == 0) {
2237 * must be put at end, use prevP
2238 * (all setup from loop) to setup
2241 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2244 goto finish_express_del;
2246 if (chk_flags & SCTP_DATA_UNORDERED) {
2247 /* queue directly into socket buffer */
2248 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2250 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2251 sctp_add_to_readq(stcb->sctp_ep, stcb,
2253 &stcb->sctp_socket->so_rcv, 1,
2254 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2257 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2259 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2267 goto finish_express_del;
2269 /* If we reach here its a reassembly */
2270 need_reasm_check = 1;
2271 SCTPDBG(SCTP_DEBUG_XXX,
2272 "Queue data to stream for reasm control: %p MID: %u\n",
2274 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2277 * the assoc is now gone and chk was put onto the reasm
2278 * queue, which has all been freed.
2286 /* Here we tidy up things */
2287 if (tsn == (asoc->cumulative_tsn + 1)) {
2288 /* Update cum-ack */
2289 asoc->cumulative_tsn = tsn;
2295 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2297 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2299 SCTP_STAT_INCR(sctps_recvdata);
2300 /* Set it present please */
2301 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2302 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2304 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2305 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2306 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2308 if (need_reasm_check) {
2309 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2310 need_reasm_check = 0;
2312 /* check the special flag for stream resets */
2313 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2314 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2316 * we have finished working through the backlogged TSN's now
2317 * time to reset streams. 1: call reset function. 2: free
2318 * pending_reply space 3: distribute any chunks in
2319 * pending_reply_queue.
2321 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2322 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2323 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2324 SCTP_FREE(liste, SCTP_M_STRESET);
2325 /* sa_ignore FREED_MEMORY */
2326 liste = TAILQ_FIRST(&asoc->resetHead);
2327 if (TAILQ_EMPTY(&asoc->resetHead)) {
2328 /* All can be removed */
2329 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2330 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2331 strm = &asoc->strmin[control->sinfo_stream];
2332 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2336 if (need_reasm_check) {
2337 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2338 need_reasm_check = 0;
2342 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2343 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2347 * if control->sinfo_tsn is <= liste->tsn we
2348 * can process it which is the NOT of
2349 * control->sinfo_tsn > liste->tsn
2351 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2352 strm = &asoc->strmin[control->sinfo_stream];
2353 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2357 if (need_reasm_check) {
2358 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2359 need_reasm_check = 0;
2367 static const int8_t sctp_map_lookup_tab[256] = {
2368 0, 1, 0, 2, 0, 1, 0, 3,
2369 0, 1, 0, 2, 0, 1, 0, 4,
2370 0, 1, 0, 2, 0, 1, 0, 3,
2371 0, 1, 0, 2, 0, 1, 0, 5,
2372 0, 1, 0, 2, 0, 1, 0, 3,
2373 0, 1, 0, 2, 0, 1, 0, 4,
2374 0, 1, 0, 2, 0, 1, 0, 3,
2375 0, 1, 0, 2, 0, 1, 0, 6,
2376 0, 1, 0, 2, 0, 1, 0, 3,
2377 0, 1, 0, 2, 0, 1, 0, 4,
2378 0, 1, 0, 2, 0, 1, 0, 3,
2379 0, 1, 0, 2, 0, 1, 0, 5,
2380 0, 1, 0, 2, 0, 1, 0, 3,
2381 0, 1, 0, 2, 0, 1, 0, 4,
2382 0, 1, 0, 2, 0, 1, 0, 3,
2383 0, 1, 0, 2, 0, 1, 0, 7,
2384 0, 1, 0, 2, 0, 1, 0, 3,
2385 0, 1, 0, 2, 0, 1, 0, 4,
2386 0, 1, 0, 2, 0, 1, 0, 3,
2387 0, 1, 0, 2, 0, 1, 0, 5,
2388 0, 1, 0, 2, 0, 1, 0, 3,
2389 0, 1, 0, 2, 0, 1, 0, 4,
2390 0, 1, 0, 2, 0, 1, 0, 3,
2391 0, 1, 0, 2, 0, 1, 0, 6,
2392 0, 1, 0, 2, 0, 1, 0, 3,
2393 0, 1, 0, 2, 0, 1, 0, 4,
2394 0, 1, 0, 2, 0, 1, 0, 3,
2395 0, 1, 0, 2, 0, 1, 0, 5,
2396 0, 1, 0, 2, 0, 1, 0, 3,
2397 0, 1, 0, 2, 0, 1, 0, 4,
2398 0, 1, 0, 2, 0, 1, 0, 3,
2399 0, 1, 0, 2, 0, 1, 0, 8
2403 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2406 * Now we also need to check the mapping array in a couple of ways.
2407 * 1) Did we move the cum-ack point?
2409 * When you first glance at this you might think that all entries
2410 * that make up the position of the cum-ack would be in the
2411 * nr-mapping array only.. i.e. things up to the cum-ack are always
2412 * deliverable. Thats true with one exception, when its a fragmented
2413 * message we may not deliver the data until some threshold (or all
2414 * of it) is in place. So we must OR the nr_mapping_array and
2415 * mapping_array to get a true picture of the cum-ack.
2417 struct sctp_association *asoc;
2420 int slide_from, slide_end, lgap, distance;
2421 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2425 old_cumack = asoc->cumulative_tsn;
2426 old_base = asoc->mapping_array_base_tsn;
2427 old_highest = asoc->highest_tsn_inside_map;
2429 * We could probably improve this a small bit by calculating the
2430 * offset of the current cum-ack as the starting point.
2433 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2434 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2438 /* there is a 0 bit */
2439 at += sctp_map_lookup_tab[val];
2443 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2445 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2446 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2448 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2449 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2451 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2452 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2453 sctp_print_mapping_array(asoc);
2454 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2455 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2457 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2458 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2461 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2462 highest_tsn = asoc->highest_tsn_inside_nr_map;
2464 highest_tsn = asoc->highest_tsn_inside_map;
2466 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2467 /* The complete array was completed by a single FR */
2468 /* highest becomes the cum-ack */
2474 /* clear the array */
2475 clr = ((at + 7) >> 3);
2476 if (clr > asoc->mapping_array_size) {
2477 clr = asoc->mapping_array_size;
2479 memset(asoc->mapping_array, 0, clr);
2480 memset(asoc->nr_mapping_array, 0, clr);
2482 for (i = 0; i < asoc->mapping_array_size; i++) {
2483 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2484 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2485 sctp_print_mapping_array(asoc);
2489 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2490 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2491 } else if (at >= 8) {
2492 /* we can slide the mapping array down */
2493 /* slide_from holds where we hit the first NON 0xff byte */
2496 * now calculate the ceiling of the move using our highest
2499 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2500 slide_end = (lgap >> 3);
2501 if (slide_end < slide_from) {
2502 sctp_print_mapping_array(asoc);
2504 panic("impossible slide");
2506 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2507 lgap, slide_end, slide_from, at);
2511 if (slide_end > asoc->mapping_array_size) {
2513 panic("would overrun buffer");
2515 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2516 asoc->mapping_array_size, slide_end);
2517 slide_end = asoc->mapping_array_size;
2520 distance = (slide_end - slide_from) + 1;
2521 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2522 sctp_log_map(old_base, old_cumack, old_highest,
2523 SCTP_MAP_PREPARE_SLIDE);
2524 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2525 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2527 if (distance + slide_from > asoc->mapping_array_size ||
2530 * Here we do NOT slide forward the array so that
2531 * hopefully when more data comes in to fill it up
2532 * we will be able to slide it forward. Really I
2533 * don't think this should happen :-0
2535 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2536 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2537 (uint32_t)asoc->mapping_array_size,
2538 SCTP_MAP_SLIDE_NONE);
2543 for (ii = 0; ii < distance; ii++) {
2544 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2545 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2547 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2548 asoc->mapping_array[ii] = 0;
2549 asoc->nr_mapping_array[ii] = 0;
2551 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2552 asoc->highest_tsn_inside_map += (slide_from << 3);
2554 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2555 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2557 asoc->mapping_array_base_tsn += (slide_from << 3);
2558 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2559 sctp_log_map(asoc->mapping_array_base_tsn,
2560 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2561 SCTP_MAP_SLIDE_RESULT);
2568 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2570 struct sctp_association *asoc;
2571 uint32_t highest_tsn;
2574 sctp_slide_mapping_arrays(stcb);
2576 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2577 highest_tsn = asoc->highest_tsn_inside_nr_map;
2579 highest_tsn = asoc->highest_tsn_inside_map;
2581 /* Is there a gap now? */
2582 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2585 * Now we need to see if we need to queue a sack or just start the
2586 * timer (if allowed).
2588 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2590 * Ok special case, in SHUTDOWN-SENT case. here we maker
2591 * sure SACK timer is off and instead send a SHUTDOWN and a
2594 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2595 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2596 stcb->sctp_ep, stcb, NULL,
2597 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2599 sctp_send_shutdown(stcb,
2600 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2602 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2606 * CMT DAC algorithm: increase number of packets received
2609 stcb->asoc.cmt_dac_pkts_rcvd++;
2611 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2613 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2615 (stcb->asoc.numduptsns) || /* we have dup's */
2616 (is_a_gap) || /* is still a gap */
2617 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2618 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */
2619 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2620 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2621 (stcb->asoc.send_sack == 0) &&
2622 (stcb->asoc.numduptsns == 0) &&
2623 (stcb->asoc.delayed_ack) &&
2624 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2626 * CMT DAC algorithm: With CMT, delay acks
2627 * even in the face of reordering.
2628 * Therefore, if acks that do not have to be
2629 * sent because of the above reasons, will
2630 * be delayed. That is, acks that would have
2631 * been sent due to gap reports will be
2632 * delayed with DAC. Start the delayed ack
2635 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2636 stcb->sctp_ep, stcb, NULL);
2639 * Ok we must build a SACK since the timer
2640 * is pending, we got our first packet OR
2641 * there are gaps or duplicates.
2643 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2644 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2645 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2648 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2649 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2650 stcb->sctp_ep, stcb, NULL);
2657 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2658 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2659 struct sctp_nets *net, uint32_t *high_tsn)
2661 struct sctp_chunkhdr *ch, chunk_buf;
2662 struct sctp_association *asoc;
2663 int num_chunks = 0; /* number of control chunks processed */
2665 int break_flag, last_chunk;
2666 int abort_flag = 0, was_a_gap;
2668 uint32_t highest_tsn;
2669 uint16_t chk_length;
2672 sctp_set_rwnd(stcb, &stcb->asoc);
2675 SCTP_TCB_LOCK_ASSERT(stcb);
2677 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2678 highest_tsn = asoc->highest_tsn_inside_nr_map;
2680 highest_tsn = asoc->highest_tsn_inside_map;
2682 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2684 * setup where we got the last DATA packet from for any SACK that
2685 * may need to go out. Don't bump the net. This is done ONLY when a
2686 * chunk is assigned.
2688 asoc->last_data_chunk_from = net;
2691 * Now before we proceed we must figure out if this is a wasted
2692 * cluster... i.e. it is a small packet sent in and yet the driver
2693 * underneath allocated a full cluster for it. If so we must copy it
2694 * to a smaller mbuf and free up the cluster mbuf. This will help
2695 * with cluster starvation.
2697 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2698 /* we only handle mbufs that are singletons.. not chains */
2699 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2701 /* ok lets see if we can copy the data up */
2704 /* get the pointers and copy */
2705 to = mtod(m, caddr_t *);
2706 from = mtod((*mm), caddr_t *);
2707 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2708 /* copy the length and free up the old */
2709 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2711 /* success, back copy */
2714 /* We are in trouble in the mbuf world .. yikes */
2718 /* get pointer to the first chunk header */
2719 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2720 sizeof(struct sctp_chunkhdr),
2721 (uint8_t *)&chunk_buf);
2726 * process all DATA chunks...
2728 *high_tsn = asoc->cumulative_tsn;
2730 asoc->data_pkts_seen++;
2731 while (stop_proc == 0) {
2732 /* validate chunk length */
2733 chk_length = ntohs(ch->chunk_length);
2734 if (length - *offset < chk_length) {
2735 /* all done, mutulated chunk */
2739 if ((asoc->idata_supported == 1) &&
2740 (ch->chunk_type == SCTP_DATA)) {
2741 struct mbuf *op_err;
2742 char msg[SCTP_DIAG_INFO_LEN];
2744 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2745 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2746 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2747 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2750 if ((asoc->idata_supported == 0) &&
2751 (ch->chunk_type == SCTP_IDATA)) {
2752 struct mbuf *op_err;
2753 char msg[SCTP_DIAG_INFO_LEN];
2755 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2756 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2757 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2758 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2761 if ((ch->chunk_type == SCTP_DATA) ||
2762 (ch->chunk_type == SCTP_IDATA)) {
2765 if (ch->chunk_type == SCTP_DATA) {
2766 clen = sizeof(struct sctp_data_chunk);
2768 clen = sizeof(struct sctp_idata_chunk);
2770 if (chk_length < clen) {
2772 * Need to send an abort since we had a
2773 * invalid data chunk.
2775 struct mbuf *op_err;
2776 char msg[SCTP_DIAG_INFO_LEN];
2778 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2779 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2781 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2782 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2783 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2786 #ifdef SCTP_AUDITING_ENABLED
2787 sctp_audit_log(0xB1, 0);
2789 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2794 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2795 chk_length, net, high_tsn, &abort_flag, &break_flag,
2796 last_chunk, ch->chunk_type)) {
2804 * Set because of out of rwnd space and no
2805 * drop rep space left.
2811 /* not a data chunk in the data region */
2812 switch (ch->chunk_type) {
2813 case SCTP_INITIATION:
2814 case SCTP_INITIATION_ACK:
2815 case SCTP_SELECTIVE_ACK:
2816 case SCTP_NR_SELECTIVE_ACK:
2817 case SCTP_HEARTBEAT_REQUEST:
2818 case SCTP_HEARTBEAT_ACK:
2819 case SCTP_ABORT_ASSOCIATION:
2821 case SCTP_SHUTDOWN_ACK:
2822 case SCTP_OPERATION_ERROR:
2823 case SCTP_COOKIE_ECHO:
2824 case SCTP_COOKIE_ACK:
2827 case SCTP_SHUTDOWN_COMPLETE:
2828 case SCTP_AUTHENTICATION:
2829 case SCTP_ASCONF_ACK:
2830 case SCTP_PACKET_DROPPED:
2831 case SCTP_STREAM_RESET:
2832 case SCTP_FORWARD_CUM_TSN:
2836 * Now, what do we do with KNOWN
2837 * chunks that are NOT in the right
2840 * For now, I do nothing but ignore
2841 * them. We may later want to add
2842 * sysctl stuff to switch out and do
2843 * either an ABORT() or possibly
2846 struct mbuf *op_err;
2847 char msg[SCTP_DIAG_INFO_LEN];
2849 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2851 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2852 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2857 * Unknown chunk type: use bit rules after
2860 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2862 * Need to send an abort since we
2863 * had a invalid chunk.
2865 struct mbuf *op_err;
2866 char msg[SCTP_DIAG_INFO_LEN];
2868 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2869 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2870 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2871 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2874 if (ch->chunk_type & 0x40) {
2875 /* Add a error report to the queue */
2876 struct mbuf *op_err;
2877 struct sctp_gen_error_cause *cause;
2879 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2880 0, M_NOWAIT, 1, MT_DATA);
2881 if (op_err != NULL) {
2882 cause = mtod(op_err, struct sctp_gen_error_cause *);
2883 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2884 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2885 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2886 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2887 if (SCTP_BUF_NEXT(op_err) != NULL) {
2888 sctp_queue_op_err(stcb, op_err);
2890 sctp_m_freem(op_err);
2894 if ((ch->chunk_type & 0x80) == 0) {
2895 /* discard the rest of this packet */
2897 } /* else skip this bad chunk and
2900 } /* switch of chunk type */
2902 *offset += SCTP_SIZE32(chk_length);
2903 if ((*offset >= length) || stop_proc) {
2904 /* no more data left in the mbuf chain */
2908 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2909 sizeof(struct sctp_chunkhdr),
2910 (uint8_t *)&chunk_buf);
2919 * we need to report rwnd overrun drops.
2921 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2925 * Did we get data, if so update the time for auto-close and
2926 * give peer credit for being alive.
2928 SCTP_STAT_INCR(sctps_recvpktwithdata);
2929 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2930 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2931 stcb->asoc.overall_error_count,
2933 SCTP_FROM_SCTP_INDATA,
2936 stcb->asoc.overall_error_count = 0;
2937 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2939 /* now service all of the reassm queue if needed */
2940 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2941 /* Assure that we ack right away */
2942 stcb->asoc.send_sack = 1;
2944 /* Start a sack timer or QUEUE a SACK for sending */
2945 sctp_sack_check(stcb, was_a_gap);
2950 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2951 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2953 uint32_t *biggest_newly_acked_tsn,
2954 uint32_t *this_sack_lowest_newack,
2957 struct sctp_tmit_chunk *tp1;
2958 unsigned int theTSN;
2959 int j, wake_him = 0, circled = 0;
2961 /* Recover the tp1 we last saw */
2964 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2966 for (j = frag_strt; j <= frag_end; j++) {
2967 theTSN = j + last_tsn;
2969 if (tp1->rec.data.doing_fast_retransmit)
2973 * CMT: CUCv2 algorithm. For each TSN being
2974 * processed from the sent queue, track the
2975 * next expected pseudo-cumack, or
2976 * rtx_pseudo_cumack, if required. Separate
2977 * cumack trackers for first transmissions,
2978 * and retransmissions.
2980 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2981 (tp1->whoTo->find_pseudo_cumack == 1) &&
2982 (tp1->snd_count == 1)) {
2983 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2984 tp1->whoTo->find_pseudo_cumack = 0;
2986 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2987 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2988 (tp1->snd_count > 1)) {
2989 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2990 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2992 if (tp1->rec.data.tsn == theTSN) {
2993 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2995 * must be held until
2998 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3000 * If it is less than RESEND, it is
3001 * now no-longer in flight.
3002 * Higher values may already be set
3003 * via previous Gap Ack Blocks...
3004 * i.e. ACKED or RESEND.
3006 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3007 *biggest_newly_acked_tsn)) {
3008 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3011 * CMT: SFR algo (and HTNA) - set
3012 * saw_newack to 1 for dest being
3013 * newly acked. update
3014 * this_sack_highest_newack if
3017 if (tp1->rec.data.chunk_was_revoked == 0)
3018 tp1->whoTo->saw_newack = 1;
3020 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3021 tp1->whoTo->this_sack_highest_newack)) {
3022 tp1->whoTo->this_sack_highest_newack =
3026 * CMT DAC algo: also update
3027 * this_sack_lowest_newack
3029 if (*this_sack_lowest_newack == 0) {
3030 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3031 sctp_log_sack(*this_sack_lowest_newack,
3036 SCTP_LOG_TSN_ACKED);
3038 *this_sack_lowest_newack = tp1->rec.data.tsn;
3041 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3042 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3043 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3044 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3045 * Separate pseudo_cumack trackers for first transmissions and
3048 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3049 if (tp1->rec.data.chunk_was_revoked == 0) {
3050 tp1->whoTo->new_pseudo_cumack = 1;
3052 tp1->whoTo->find_pseudo_cumack = 1;
3054 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3055 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3057 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3058 if (tp1->rec.data.chunk_was_revoked == 0) {
3059 tp1->whoTo->new_pseudo_cumack = 1;
3061 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3063 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3064 sctp_log_sack(*biggest_newly_acked_tsn,
3069 SCTP_LOG_TSN_ACKED);
3071 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3072 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3073 tp1->whoTo->flight_size,
3075 (uint32_t)(uintptr_t)tp1->whoTo,
3078 sctp_flight_size_decrease(tp1);
3079 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3080 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3083 sctp_total_flight_decrease(stcb, tp1);
3085 tp1->whoTo->net_ack += tp1->send_size;
3086 if (tp1->snd_count < 2) {
3088 * True non-retransmitted chunk
3090 tp1->whoTo->net_ack2 += tp1->send_size;
3097 sctp_calculate_rto(stcb,
3100 &tp1->sent_rcv_time,
3101 SCTP_RTT_FROM_DATA)) {
3104 if (tp1->whoTo->rto_needed == 0) {
3105 tp1->whoTo->rto_needed = 1;
3111 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3112 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3113 stcb->asoc.this_sack_highest_gap)) {
3114 stcb->asoc.this_sack_highest_gap =
3117 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3118 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3119 #ifdef SCTP_AUDITING_ENABLED
3120 sctp_audit_log(0xB2,
3121 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3126 * All chunks NOT UNSENT fall through here and are marked
3127 * (leave PR-SCTP ones that are to skip alone though)
3129 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3130 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3131 tp1->sent = SCTP_DATAGRAM_MARKED;
3133 if (tp1->rec.data.chunk_was_revoked) {
3134 /* deflate the cwnd */
3135 tp1->whoTo->cwnd -= tp1->book_size;
3136 tp1->rec.data.chunk_was_revoked = 0;
3138 /* NR Sack code here */
3140 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3141 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3142 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3145 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3148 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3149 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3150 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3151 stcb->asoc.trigger_reset = 1;
3153 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3159 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3160 sctp_m_freem(tp1->data);
3167 } /* if (tp1->tsn == theTSN) */
3168 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3171 tp1 = TAILQ_NEXT(tp1, sctp_next);
3172 if ((tp1 == NULL) && (circled == 0)) {
3174 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3176 } /* end while (tp1) */
3179 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3181 /* In case the fragments were not in order we must reset */
3182 } /* end for (j = fragStart */
3184 return (wake_him); /* Return value only used for nr-sack */
3188 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3189 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3190 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3191 int num_seg, int num_nr_seg, int *rto_ok)
3193 struct sctp_gap_ack_block *frag, block;
3194 struct sctp_tmit_chunk *tp1;
3199 uint16_t frag_strt, frag_end, prev_frag_end;
3201 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3205 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3208 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3210 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3211 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3212 *offset += sizeof(block);
3214 return (chunk_freed);
3216 frag_strt = ntohs(frag->start);
3217 frag_end = ntohs(frag->end);
3219 if (frag_strt > frag_end) {
3220 /* This gap report is malformed, skip it. */
3223 if (frag_strt <= prev_frag_end) {
3224 /* This gap report is not in order, so restart. */
3225 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3227 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3228 *biggest_tsn_acked = last_tsn + frag_end;
3235 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3236 non_revocable, &num_frs, biggest_newly_acked_tsn,
3237 this_sack_lowest_newack, rto_ok)) {
3240 prev_frag_end = frag_end;
3242 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3244 sctp_log_fr(*biggest_tsn_acked,
3245 *biggest_newly_acked_tsn,
3246 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3248 return (chunk_freed);
3252 sctp_check_for_revoked(struct sctp_tcb *stcb,
3253 struct sctp_association *asoc, uint32_t cumack,
3254 uint32_t biggest_tsn_acked)
3256 struct sctp_tmit_chunk *tp1;
3258 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3259 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3261 * ok this guy is either ACK or MARKED. If it is
3262 * ACKED it has been previously acked but not this
3263 * time i.e. revoked. If it is MARKED it was ACK'ed
3266 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3269 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3270 /* it has been revoked */
3271 tp1->sent = SCTP_DATAGRAM_SENT;
3272 tp1->rec.data.chunk_was_revoked = 1;
3274 * We must add this stuff back in to assure
3275 * timers and such get started.
3277 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3278 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3279 tp1->whoTo->flight_size,
3281 (uint32_t)(uintptr_t)tp1->whoTo,
3284 sctp_flight_size_increase(tp1);
3285 sctp_total_flight_increase(stcb, tp1);
3287 * We inflate the cwnd to compensate for our
3288 * artificial inflation of the flight_size.
3290 tp1->whoTo->cwnd += tp1->book_size;
3291 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3292 sctp_log_sack(asoc->last_acked_seq,
3297 SCTP_LOG_TSN_REVOKED);
3299 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3300 /* it has been re-acked in this SACK */
3301 tp1->sent = SCTP_DATAGRAM_ACKED;
3304 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3310 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3311 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3313 struct sctp_tmit_chunk *tp1;
3314 int strike_flag = 0;
3316 int tot_retrans = 0;
3317 uint32_t sending_seq;
3318 struct sctp_nets *net;
3319 int num_dests_sacked = 0;
3322 * select the sending_seq, this is either the next thing ready to be
3323 * sent but not transmitted, OR, the next seq we assign.
3325 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3327 sending_seq = asoc->sending_seq;
3329 sending_seq = tp1->rec.data.tsn;
3332 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3333 if ((asoc->sctp_cmt_on_off > 0) &&
3334 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3335 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3336 if (net->saw_newack)
3340 if (stcb->asoc.prsctp_supported) {
3341 (void)SCTP_GETTIME_TIMEVAL(&now);
3343 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3345 if (tp1->no_fr_allowed) {
3346 /* this one had a timeout or something */
3349 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3350 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3351 sctp_log_fr(biggest_tsn_newly_acked,
3354 SCTP_FR_LOG_CHECK_STRIKE);
3356 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3357 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3361 if (stcb->asoc.prsctp_supported) {
3362 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3363 /* Is it expired? */
3364 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3365 /* Yes so drop it */
3366 if (tp1->data != NULL) {
3367 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3368 SCTP_SO_NOT_LOCKED);
3374 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3375 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3376 /* we are beyond the tsn in the sack */
3379 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3380 /* either a RESEND, ACKED, or MARKED */
3382 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3383 /* Continue strikin FWD-TSN chunks */
3384 tp1->rec.data.fwd_tsn_cnt++;
3389 * CMT : SFR algo (covers part of DAC and HTNA as well)
3391 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3393 * No new acks were receieved for data sent to this
3394 * dest. Therefore, according to the SFR algo for
3395 * CMT, no data sent to this dest can be marked for
3396 * FR using this SACK.
3399 } else if (tp1->whoTo &&
3400 SCTP_TSN_GT(tp1->rec.data.tsn,
3401 tp1->whoTo->this_sack_highest_newack) &&
3402 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3404 * CMT: New acks were receieved for data sent to
3405 * this dest. But no new acks were seen for data
3406 * sent after tp1. Therefore, according to the SFR
3407 * algo for CMT, tp1 cannot be marked for FR using
3408 * this SACK. This step covers part of the DAC algo
3409 * and the HTNA algo as well.
3414 * Here we check to see if we were have already done a FR
3415 * and if so we see if the biggest TSN we saw in the sack is
3416 * smaller than the recovery point. If so we don't strike
3417 * the tsn... otherwise we CAN strike the TSN.
3420 * @@@ JRI: Check for CMT if (accum_moved &&
3421 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3424 if (accum_moved && asoc->fast_retran_loss_recovery) {
3426 * Strike the TSN if in fast-recovery and cum-ack
3429 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3430 sctp_log_fr(biggest_tsn_newly_acked,
3433 SCTP_FR_LOG_STRIKE_CHUNK);
3435 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3438 if ((asoc->sctp_cmt_on_off > 0) &&
3439 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3441 * CMT DAC algorithm: If SACK flag is set to
3442 * 0, then lowest_newack test will not pass
3443 * because it would have been set to the
3444 * cumack earlier. If not already to be
3445 * rtx'd, If not a mixed sack and if tp1 is
3446 * not between two sacked TSNs, then mark by
3447 * one more. NOTE that we are marking by one
3448 * additional time since the SACK DAC flag
3449 * indicates that two packets have been
3450 * received after this missing TSN.
3452 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3453 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3454 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3455 sctp_log_fr(16 + num_dests_sacked,
3458 SCTP_FR_LOG_STRIKE_CHUNK);
3463 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3464 (asoc->sctp_cmt_on_off == 0)) {
3466 * For those that have done a FR we must take
3467 * special consideration if we strike. I.e the
3468 * biggest_newly_acked must be higher than the
3469 * sending_seq at the time we did the FR.
3472 #ifdef SCTP_FR_TO_ALTERNATE
3474 * If FR's go to new networks, then we must only do
3475 * this for singly homed asoc's. However if the FR's
3476 * go to the same network (Armando's work) then its
3477 * ok to FR multiple times.
3484 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3485 tp1->rec.data.fast_retran_tsn)) {
3487 * Strike the TSN, since this ack is
3488 * beyond where things were when we
3491 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3492 sctp_log_fr(biggest_tsn_newly_acked,
3495 SCTP_FR_LOG_STRIKE_CHUNK);
3497 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3501 if ((asoc->sctp_cmt_on_off > 0) &&
3502 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3504 * CMT DAC algorithm: If
3505 * SACK flag is set to 0,
3506 * then lowest_newack test
3507 * will not pass because it
3508 * would have been set to
3509 * the cumack earlier. If
3510 * not already to be rtx'd,
3511 * If not a mixed sack and
3512 * if tp1 is not between two
3513 * sacked TSNs, then mark by
3514 * one more. NOTE that we
3515 * are marking by one
3516 * additional time since the
3517 * SACK DAC flag indicates
3518 * that two packets have
3519 * been received after this
3522 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3523 (num_dests_sacked == 1) &&
3524 SCTP_TSN_GT(this_sack_lowest_newack,
3525 tp1->rec.data.tsn)) {
3526 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3527 sctp_log_fr(32 + num_dests_sacked,
3530 SCTP_FR_LOG_STRIKE_CHUNK);
3532 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3540 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3543 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3544 biggest_tsn_newly_acked)) {
3546 * We don't strike these: This is the HTNA
3547 * algorithm i.e. we don't strike If our TSN is
3548 * larger than the Highest TSN Newly Acked.
3552 /* Strike the TSN */
3553 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3554 sctp_log_fr(biggest_tsn_newly_acked,
3557 SCTP_FR_LOG_STRIKE_CHUNK);
3559 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3562 if ((asoc->sctp_cmt_on_off > 0) &&
3563 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3565 * CMT DAC algorithm: If SACK flag is set to
3566 * 0, then lowest_newack test will not pass
3567 * because it would have been set to the
3568 * cumack earlier. If not already to be
3569 * rtx'd, If not a mixed sack and if tp1 is
3570 * not between two sacked TSNs, then mark by
3571 * one more. NOTE that we are marking by one
3572 * additional time since the SACK DAC flag
3573 * indicates that two packets have been
3574 * received after this missing TSN.
3576 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3577 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3578 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3579 sctp_log_fr(48 + num_dests_sacked,
3582 SCTP_FR_LOG_STRIKE_CHUNK);
3588 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3589 struct sctp_nets *alt;
3591 /* fix counts and things */
3592 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3593 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3594 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3596 (uint32_t)(uintptr_t)tp1->whoTo,
3600 tp1->whoTo->net_ack++;
3601 sctp_flight_size_decrease(tp1);
3602 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3603 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3608 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3609 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3610 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3612 /* add back to the rwnd */
3613 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3615 /* remove from the total flight */
3616 sctp_total_flight_decrease(stcb, tp1);
3618 if ((stcb->asoc.prsctp_supported) &&
3619 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3621 * Has it been retransmitted tv_sec times? -
3622 * we store the retran count there.
3624 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3625 /* Yes, so drop it */
3626 if (tp1->data != NULL) {
3627 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3628 SCTP_SO_NOT_LOCKED);
3630 /* Make sure to flag we had a FR */
3631 if (tp1->whoTo != NULL) {
3632 tp1->whoTo->net_ack++;
3638 * SCTP_PRINTF("OK, we are now ready to FR this
3641 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3642 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3646 /* This is a subsequent FR */
3647 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3649 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3650 if (asoc->sctp_cmt_on_off > 0) {
3652 * CMT: Using RTX_SSTHRESH policy for CMT.
3653 * If CMT is being used, then pick dest with
3654 * largest ssthresh for any retransmission.
3656 tp1->no_fr_allowed = 1;
3658 /* sa_ignore NO_NULL_CHK */
3659 if (asoc->sctp_cmt_pf > 0) {
3661 * JRS 5/18/07 - If CMT PF is on,
3662 * use the PF version of
3665 alt = sctp_find_alternate_net(stcb, alt, 2);
3668 * JRS 5/18/07 - If only CMT is on,
3669 * use the CMT version of
3672 /* sa_ignore NO_NULL_CHK */
3673 alt = sctp_find_alternate_net(stcb, alt, 1);
3679 * CUCv2: If a different dest is picked for
3680 * the retransmission, then new
3681 * (rtx-)pseudo_cumack needs to be tracked
3682 * for orig dest. Let CUCv2 track new (rtx-)
3683 * pseudo-cumack always.
3686 tp1->whoTo->find_pseudo_cumack = 1;
3687 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3689 } else { /* CMT is OFF */
3690 #ifdef SCTP_FR_TO_ALTERNATE
3691 /* Can we find an alternate? */
3692 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3695 * default behavior is to NOT retransmit
3696 * FR's to an alternate. Armando Caro's
3697 * paper details why.
3703 tp1->rec.data.doing_fast_retransmit = 1;
3705 /* mark the sending seq for possible subsequent FR's */
3707 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3708 * (uint32_t)tpi->rec.data.tsn);
3710 if (TAILQ_EMPTY(&asoc->send_queue)) {
3712 * If the queue of send is empty then its
3713 * the next sequence number that will be
3714 * assigned so we subtract one from this to
3715 * get the one we last sent.
3717 tp1->rec.data.fast_retran_tsn = sending_seq;
3720 * If there are chunks on the send queue
3721 * (unsent data that has made it from the
3722 * stream queues but not out the door, we
3723 * take the first one (which will have the
3724 * lowest TSN) and subtract one to get the
3727 struct sctp_tmit_chunk *ttt;
3729 ttt = TAILQ_FIRST(&asoc->send_queue);
3730 tp1->rec.data.fast_retran_tsn =
3736 * this guy had a RTO calculation pending on
3739 if ((tp1->whoTo != NULL) &&
3740 (tp1->whoTo->rto_needed == 0)) {
3741 tp1->whoTo->rto_needed = 1;
3745 if (alt != tp1->whoTo) {
3746 /* yes, there is an alternate. */
3747 sctp_free_remote_addr(tp1->whoTo);
3748 /* sa_ignore FREED_MEMORY */
3750 atomic_add_int(&alt->ref_count, 1);
3756 struct sctp_tmit_chunk *
3757 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3758 struct sctp_association *asoc)
3760 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3764 if (asoc->prsctp_supported == 0) {
3767 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3768 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3769 tp1->sent != SCTP_DATAGRAM_RESEND &&
3770 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3771 /* no chance to advance, out of here */
3774 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3775 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3776 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3777 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3778 asoc->advanced_peer_ack_point,
3779 tp1->rec.data.tsn, 0, 0);
3782 if (!PR_SCTP_ENABLED(tp1->flags)) {
3784 * We can't fwd-tsn past any that are reliable aka
3785 * retransmitted until the asoc fails.
3790 (void)SCTP_GETTIME_TIMEVAL(&now);
3794 * now we got a chunk which is marked for another
3795 * retransmission to a PR-stream but has run out its chances
3796 * already maybe OR has been marked to skip now. Can we skip
3797 * it if its a resend?
3799 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3800 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3802 * Now is this one marked for resend and its time is
3805 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3806 /* Yes so drop it */
3808 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3809 1, SCTP_SO_NOT_LOCKED);
3813 * No, we are done when hit one for resend
3814 * whos time as not expired.
3820 * Ok now if this chunk is marked to drop it we can clean up
3821 * the chunk, advance our peer ack point and we can check
3824 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3825 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3826 /* advance PeerAckPoint goes forward */
3827 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3828 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3830 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3831 /* No update but we do save the chk */
3836 * If it is still in RESEND we can advance no
3846 sctp_fs_audit(struct sctp_association *asoc)
3848 struct sctp_tmit_chunk *chk;
3849 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3852 int entry_flight, entry_cnt;
3857 entry_flight = asoc->total_flight;
3858 entry_cnt = asoc->total_flight_count;
3860 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3863 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3864 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3865 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3870 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3872 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3874 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3881 if ((inflight > 0) || (inbetween > 0)) {
3883 panic("Flight size-express incorrect? \n");
3885 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3886 entry_flight, entry_cnt);
3888 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3889 inflight, inbetween, resend, above, acked);
3897 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3898 struct sctp_association *asoc,
3899 struct sctp_tmit_chunk *tp1)
3901 tp1->window_probe = 0;
3902 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3903 /* TSN's skipped we do NOT move back. */
3904 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3905 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3907 (uint32_t)(uintptr_t)tp1->whoTo,
3911 /* First setup this by shrinking flight */
3912 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3913 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3916 sctp_flight_size_decrease(tp1);
3917 sctp_total_flight_decrease(stcb, tp1);
3918 /* Now mark for resend */
3919 tp1->sent = SCTP_DATAGRAM_RESEND;
3920 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3922 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3923 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3924 tp1->whoTo->flight_size,
3926 (uint32_t)(uintptr_t)tp1->whoTo,
3932 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3933 uint32_t rwnd, int *abort_now, int ecne_seen)
3935 struct sctp_nets *net;
3936 struct sctp_association *asoc;
3937 struct sctp_tmit_chunk *tp1, *tp2;
3939 int win_probe_recovery = 0;
3940 int win_probe_recovered = 0;
3941 int j, done_once = 0;
3945 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3946 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3947 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3949 SCTP_TCB_LOCK_ASSERT(stcb);
3950 #ifdef SCTP_ASOCLOG_OF_TSNS
3951 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3952 stcb->asoc.cumack_log_at++;
3953 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3954 stcb->asoc.cumack_log_at = 0;
3958 old_rwnd = asoc->peers_rwnd;
3959 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3962 } else if (asoc->last_acked_seq == cumack) {
3963 /* Window update sack */
3964 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3965 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3966 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3967 /* SWS sender side engages */
3968 asoc->peers_rwnd = 0;
3970 if (asoc->peers_rwnd > old_rwnd) {
3976 /* First setup for CC stuff */
3977 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3978 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3979 /* Drag along the window_tsn for cwr's */
3980 net->cwr_window_tsn = cumack;
3982 net->prev_cwnd = net->cwnd;
3987 * CMT: Reset CUC and Fast recovery algo variables before
3990 net->new_pseudo_cumack = 0;
3991 net->will_exit_fast_recovery = 0;
3992 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3993 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3996 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3997 tp1 = TAILQ_LAST(&asoc->sent_queue,
3998 sctpchunk_listhead);
3999 send_s = tp1->rec.data.tsn + 1;
4001 send_s = asoc->sending_seq;
4003 if (SCTP_TSN_GE(cumack, send_s)) {
4004 struct mbuf *op_err;
4005 char msg[SCTP_DIAG_INFO_LEN];
4009 SCTP_SNPRINTF(msg, sizeof(msg),
4010 "Cum ack %8.8x greater or equal than TSN %8.8x",
4012 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4013 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4014 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4017 asoc->this_sack_highest_gap = cumack;
4018 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4019 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4020 stcb->asoc.overall_error_count,
4022 SCTP_FROM_SCTP_INDATA,
4025 stcb->asoc.overall_error_count = 0;
4026 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4027 /* process the new consecutive TSN first */
4028 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4029 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4030 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4031 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4033 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4035 * If it is less than ACKED, it is
4036 * now no-longer in flight. Higher
4037 * values may occur during marking
4039 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4040 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4041 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4042 tp1->whoTo->flight_size,
4044 (uint32_t)(uintptr_t)tp1->whoTo,
4047 sctp_flight_size_decrease(tp1);
4048 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4049 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4052 /* sa_ignore NO_NULL_CHK */
4053 sctp_total_flight_decrease(stcb, tp1);
4055 tp1->whoTo->net_ack += tp1->send_size;
4056 if (tp1->snd_count < 2) {
4058 * True non-retransmitted
4061 tp1->whoTo->net_ack2 +=
4064 /* update RTO too? */
4067 sctp_calculate_rto(stcb,
4070 &tp1->sent_rcv_time,
4071 SCTP_RTT_FROM_DATA)) {
4074 if (tp1->whoTo->rto_needed == 0) {
4075 tp1->whoTo->rto_needed = 1;
4081 * CMT: CUCv2 algorithm. From the
4082 * cumack'd TSNs, for each TSN being
4083 * acked for the first time, set the
4084 * following variables for the
4085 * corresp destination.
4086 * new_pseudo_cumack will trigger a
4088 * find_(rtx_)pseudo_cumack will
4089 * trigger search for the next
4090 * expected (rtx-)pseudo-cumack.
4092 tp1->whoTo->new_pseudo_cumack = 1;
4093 tp1->whoTo->find_pseudo_cumack = 1;
4094 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4095 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4096 /* sa_ignore NO_NULL_CHK */
4097 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4100 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4101 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4103 if (tp1->rec.data.chunk_was_revoked) {
4104 /* deflate the cwnd */
4105 tp1->whoTo->cwnd -= tp1->book_size;
4106 tp1->rec.data.chunk_was_revoked = 0;
4108 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4109 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4110 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4113 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4117 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4118 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4119 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4120 asoc->trigger_reset = 1;
4122 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4124 /* sa_ignore NO_NULL_CHK */
4125 sctp_free_bufspace(stcb, asoc, tp1, 1);
4126 sctp_m_freem(tp1->data);
4129 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4130 sctp_log_sack(asoc->last_acked_seq,
4135 SCTP_LOG_FREE_SENT);
4137 asoc->sent_queue_cnt--;
4138 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4144 /* sa_ignore NO_NULL_CHK */
4145 if (stcb->sctp_socket) {
4146 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4147 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4148 /* sa_ignore NO_NULL_CHK */
4149 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4151 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4153 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4154 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4158 /* JRS - Use the congestion control given in the CC module */
4159 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4160 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4161 if (net->net_ack2 > 0) {
4163 * Karn's rule applies to clearing error
4164 * count, this is optional.
4166 net->error_count = 0;
4167 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4168 /* addr came good */
4169 net->dest_state |= SCTP_ADDR_REACHABLE;
4170 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4171 0, (void *)net, SCTP_SO_NOT_LOCKED);
4173 if (net == stcb->asoc.primary_destination) {
4174 if (stcb->asoc.alternate) {
4176 * release the alternate,
4179 sctp_free_remote_addr(stcb->asoc.alternate);
4180 stcb->asoc.alternate = NULL;
4183 if (net->dest_state & SCTP_ADDR_PF) {
4184 net->dest_state &= ~SCTP_ADDR_PF;
4185 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4186 stcb->sctp_ep, stcb, net,
4187 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4188 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4189 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4190 /* Done with this net */
4193 /* restore any doubled timers */
4194 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4195 if (net->RTO < stcb->asoc.minrto) {
4196 net->RTO = stcb->asoc.minrto;
4198 if (net->RTO > stcb->asoc.maxrto) {
4199 net->RTO = stcb->asoc.maxrto;
4203 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4205 asoc->last_acked_seq = cumack;
4207 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4208 /* nothing left in-flight */
4209 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4210 net->flight_size = 0;
4211 net->partial_bytes_acked = 0;
4213 asoc->total_flight = 0;
4214 asoc->total_flight_count = 0;
4218 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4219 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4220 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4221 /* SWS sender side engages */
4222 asoc->peers_rwnd = 0;
4224 if (asoc->peers_rwnd > old_rwnd) {
4225 win_probe_recovery = 1;
4227 /* Now assure a timer where data is queued at */
4230 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4231 if (win_probe_recovery && (net->window_probe)) {
4232 win_probe_recovered = 1;
4234 * Find first chunk that was used with window probe
4235 * and clear the sent
4237 /* sa_ignore FREED_MEMORY */
4238 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4239 if (tp1->window_probe) {
4240 /* move back to data send queue */
4241 sctp_window_probe_recovery(stcb, asoc, tp1);
4246 if (net->flight_size) {
4248 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4249 if (net->window_probe) {
4250 net->window_probe = 0;
4253 if (net->window_probe) {
4255 * In window probes we must assure a timer
4256 * is still running there
4258 net->window_probe = 0;
4259 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4260 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4262 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4263 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4265 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4270 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4271 (asoc->sent_queue_retran_cnt == 0) &&
4272 (win_probe_recovered == 0) &&
4275 * huh, this should not happen unless all packets are
4276 * PR-SCTP and marked to skip of course.
4278 if (sctp_fs_audit(asoc)) {
4279 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4280 net->flight_size = 0;
4282 asoc->total_flight = 0;
4283 asoc->total_flight_count = 0;
4284 asoc->sent_queue_retran_cnt = 0;
4285 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4286 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4287 sctp_flight_size_increase(tp1);
4288 sctp_total_flight_increase(stcb, tp1);
4289 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4290 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4297 /**********************************/
4298 /* Now what about shutdown issues */
4299 /**********************************/
4300 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4301 /* nothing left on sendqueue.. consider done */
4303 if ((asoc->stream_queue_cnt == 1) &&
4304 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4305 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4306 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4307 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4309 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4310 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4311 (asoc->stream_queue_cnt == 1) &&
4312 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4313 struct mbuf *op_err;
4317 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4318 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4319 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4322 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4323 (asoc->stream_queue_cnt == 0)) {
4324 struct sctp_nets *netp;
4326 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4327 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4328 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4330 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4331 sctp_stop_timers_for_shutdown(stcb);
4332 if (asoc->alternate) {
4333 netp = asoc->alternate;
4335 netp = asoc->primary_destination;
4337 sctp_send_shutdown(stcb, netp);
4338 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4339 stcb->sctp_ep, stcb, netp);
4340 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4341 stcb->sctp_ep, stcb, NULL);
4342 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4343 (asoc->stream_queue_cnt == 0)) {
4344 struct sctp_nets *netp;
4346 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4347 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4348 sctp_stop_timers_for_shutdown(stcb);
4349 if (asoc->alternate) {
4350 netp = asoc->alternate;
4352 netp = asoc->primary_destination;
4354 sctp_send_shutdown_ack(stcb, netp);
4355 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4356 stcb->sctp_ep, stcb, netp);
4359 /*********************************************/
4360 /* Here we perform PR-SCTP procedures */
4362 /*********************************************/
4363 /* C1. update advancedPeerAckPoint */
4364 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4365 asoc->advanced_peer_ack_point = cumack;
4367 /* PR-Sctp issues need to be addressed too */
4368 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4369 struct sctp_tmit_chunk *lchk;
4370 uint32_t old_adv_peer_ack_point;
4372 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4373 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4374 /* C3. See if we need to send a Fwd-TSN */
4375 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4377 * ISSUE with ECN, see FWD-TSN processing.
4379 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4380 send_forward_tsn(stcb, asoc);
4382 /* try to FR fwd-tsn's that get lost too */
4383 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4384 send_forward_tsn(stcb, asoc);
4388 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4389 if (lchk->whoTo != NULL) {
4394 /* Assure a timer is up */
4395 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4396 stcb->sctp_ep, stcb, lchk->whoTo);
4399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4400 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4402 stcb->asoc.peers_rwnd,
4403 stcb->asoc.total_flight,
4404 stcb->asoc.total_output_queue_size);
4409 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4410 struct sctp_tcb *stcb,
4411 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4412 int *abort_now, uint8_t flags,
4413 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4415 struct sctp_association *asoc;
4416 struct sctp_tmit_chunk *tp1, *tp2;
4417 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4418 uint16_t wake_him = 0;
4419 uint32_t send_s = 0;
4421 int accum_moved = 0;
4422 int will_exit_fast_recovery = 0;
4423 uint32_t a_rwnd, old_rwnd;
4424 int win_probe_recovery = 0;
4425 int win_probe_recovered = 0;
4426 struct sctp_nets *net = NULL;
4429 uint8_t reneged_all = 0;
4430 uint8_t cmt_dac_flag;
4433 * we take any chance we can to service our queues since we cannot
4434 * get awoken when the socket is read from :<
4437 * Now perform the actual SACK handling: 1) Verify that it is not an
4438 * old sack, if so discard. 2) If there is nothing left in the send
4439 * queue (cum-ack is equal to last acked) then you have a duplicate
4440 * too, update any rwnd change and verify no timers are running.
4441 * then return. 3) Process any new consequtive data i.e. cum-ack
4442 * moved process these first and note that it moved. 4) Process any
4443 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4444 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4445 * sync up flightsizes and things, stop all timers and also check
4446 * for shutdown_pending state. If so then go ahead and send off the
4447 * shutdown. If in shutdown recv, send off the shutdown-ack and
4448 * start that timer, Ret. 9) Strike any non-acked things and do FR
4449 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4450 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4451 * if in shutdown_recv state.
4453 SCTP_TCB_LOCK_ASSERT(stcb);
4455 this_sack_lowest_newack = 0;
4456 SCTP_STAT_INCR(sctps_slowpath_sack);
4458 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4459 #ifdef SCTP_ASOCLOG_OF_TSNS
4460 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4461 stcb->asoc.cumack_log_at++;
4462 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4463 stcb->asoc.cumack_log_at = 0;
4468 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4469 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4470 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4473 old_rwnd = stcb->asoc.peers_rwnd;
4474 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4475 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4476 stcb->asoc.overall_error_count,
4478 SCTP_FROM_SCTP_INDATA,
4481 stcb->asoc.overall_error_count = 0;
4483 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4484 sctp_log_sack(asoc->last_acked_seq,
4491 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4493 uint32_t *dupdata, dblock;
4495 for (i = 0; i < num_dup; i++) {
4496 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4497 sizeof(uint32_t), (uint8_t *)&dblock);
4498 if (dupdata == NULL) {
4501 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4505 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4506 tp1 = TAILQ_LAST(&asoc->sent_queue,
4507 sctpchunk_listhead);
4508 send_s = tp1->rec.data.tsn + 1;
4511 send_s = asoc->sending_seq;
4513 if (SCTP_TSN_GE(cum_ack, send_s)) {
4514 struct mbuf *op_err;
4515 char msg[SCTP_DIAG_INFO_LEN];
4518 * no way, we have not even sent this TSN out yet. Peer is
4519 * hopelessly messed up with us.
4521 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4524 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4525 tp1->rec.data.tsn, (void *)tp1);
4530 SCTP_SNPRINTF(msg, sizeof(msg),
4531 "Cum ack %8.8x greater or equal than TSN %8.8x",
4533 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4534 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4535 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4538 /**********************/
4539 /* 1) check the range */
4540 /**********************/
4541 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4542 /* acking something behind */
4546 /* update the Rwnd of the peer */
4547 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4548 TAILQ_EMPTY(&asoc->send_queue) &&
4549 (asoc->stream_queue_cnt == 0)) {
4550 /* nothing left on send/sent and strmq */
4551 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4552 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4553 asoc->peers_rwnd, 0, 0, a_rwnd);
4555 asoc->peers_rwnd = a_rwnd;
4556 if (asoc->sent_queue_retran_cnt) {
4557 asoc->sent_queue_retran_cnt = 0;
4559 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4560 /* SWS sender side engages */
4561 asoc->peers_rwnd = 0;
4563 /* stop any timers */
4564 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4565 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4566 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4567 net->partial_bytes_acked = 0;
4568 net->flight_size = 0;
4570 asoc->total_flight = 0;
4571 asoc->total_flight_count = 0;
4575 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4576 * things. The total byte count acked is tracked in netAckSz AND
4577 * netAck2 is used to track the total bytes acked that are un-
4578 * amibguious and were never retransmitted. We track these on a per
4579 * destination address basis.
4581 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4582 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4583 /* Drag along the window_tsn for cwr's */
4584 net->cwr_window_tsn = cum_ack;
4586 net->prev_cwnd = net->cwnd;
4591 * CMT: Reset CUC and Fast recovery algo variables before
4594 net->new_pseudo_cumack = 0;
4595 net->will_exit_fast_recovery = 0;
4596 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4597 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4601 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4602 * to be greater than the cumack. Also reset saw_newack to 0
4605 net->saw_newack = 0;
4606 net->this_sack_highest_newack = last_tsn;
4608 /* process the new consecutive TSN first */
4609 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4610 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4611 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4613 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4615 * If it is less than ACKED, it is
4616 * now no-longer in flight. Higher
4617 * values may occur during marking
4619 if ((tp1->whoTo->dest_state &
4620 SCTP_ADDR_UNCONFIRMED) &&
4621 (tp1->snd_count < 2)) {
4623 * If there was no retran
4624 * and the address is
4625 * un-confirmed and we sent
4627 * sacked.. its confirmed,
4630 tp1->whoTo->dest_state &=
4631 ~SCTP_ADDR_UNCONFIRMED;
4633 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4634 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4635 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4636 tp1->whoTo->flight_size,
4638 (uint32_t)(uintptr_t)tp1->whoTo,
4641 sctp_flight_size_decrease(tp1);
4642 sctp_total_flight_decrease(stcb, tp1);
4643 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4644 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4648 tp1->whoTo->net_ack += tp1->send_size;
4650 /* CMT SFR and DAC algos */
4651 this_sack_lowest_newack = tp1->rec.data.tsn;
4652 tp1->whoTo->saw_newack = 1;
4654 if (tp1->snd_count < 2) {
4656 * True non-retransmitted
4659 tp1->whoTo->net_ack2 +=
4662 /* update RTO too? */
4665 sctp_calculate_rto(stcb,
4668 &tp1->sent_rcv_time,
4669 SCTP_RTT_FROM_DATA)) {
4672 if (tp1->whoTo->rto_needed == 0) {
4673 tp1->whoTo->rto_needed = 1;
4679 * CMT: CUCv2 algorithm. From the
4680 * cumack'd TSNs, for each TSN being
4681 * acked for the first time, set the
4682 * following variables for the
4683 * corresp destination.
4684 * new_pseudo_cumack will trigger a
4686 * find_(rtx_)pseudo_cumack will
4687 * trigger search for the next
4688 * expected (rtx-)pseudo-cumack.
4690 tp1->whoTo->new_pseudo_cumack = 1;
4691 tp1->whoTo->find_pseudo_cumack = 1;
4692 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4693 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4694 sctp_log_sack(asoc->last_acked_seq,
4699 SCTP_LOG_TSN_ACKED);
4701 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4702 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4705 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4706 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4707 #ifdef SCTP_AUDITING_ENABLED
4708 sctp_audit_log(0xB3,
4709 (asoc->sent_queue_retran_cnt & 0x000000ff));
4712 if (tp1->rec.data.chunk_was_revoked) {
4713 /* deflate the cwnd */
4714 tp1->whoTo->cwnd -= tp1->book_size;
4715 tp1->rec.data.chunk_was_revoked = 0;
4717 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4718 tp1->sent = SCTP_DATAGRAM_ACKED;
4725 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4726 /* always set this up to cum-ack */
4727 asoc->this_sack_highest_gap = last_tsn;
4729 if ((num_seg > 0) || (num_nr_seg > 0)) {
4731 * thisSackHighestGap will increase while handling NEW
4732 * segments this_sack_highest_newack will increase while
4733 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4734 * used for CMT DAC algo. saw_newack will also change.
4736 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4737 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4738 num_seg, num_nr_seg, &rto_ok)) {
4742 * validate the biggest_tsn_acked in the gap acks if strict
4743 * adherence is wanted.
4745 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4747 * peer is either confused or we are under attack.
4750 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4751 biggest_tsn_acked, send_s);
4755 /*******************************************/
4756 /* cancel ALL T3-send timer if accum moved */
4757 /*******************************************/
4758 if (asoc->sctp_cmt_on_off > 0) {
4759 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4760 if (net->new_pseudo_cumack)
4761 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4763 SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4767 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4768 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4769 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4773 /********************************************/
4774 /* drop the acked chunks from the sentqueue */
4775 /********************************************/
4776 asoc->last_acked_seq = cum_ack;
4778 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4779 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4782 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4783 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4784 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4787 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4791 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4792 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4793 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4794 asoc->trigger_reset = 1;
4796 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4797 if (PR_SCTP_ENABLED(tp1->flags)) {
4798 if (asoc->pr_sctp_cnt != 0)
4799 asoc->pr_sctp_cnt--;
4801 asoc->sent_queue_cnt--;
4803 /* sa_ignore NO_NULL_CHK */
4804 sctp_free_bufspace(stcb, asoc, tp1, 1);
4805 sctp_m_freem(tp1->data);
4807 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4808 asoc->sent_queue_cnt_removeable--;
4811 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4812 sctp_log_sack(asoc->last_acked_seq,
4817 SCTP_LOG_FREE_SENT);
4819 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4822 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4824 panic("Warning flight size is positive and should be 0");
4826 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4827 asoc->total_flight);
4829 asoc->total_flight = 0;
4832 /* sa_ignore NO_NULL_CHK */
4833 if ((wake_him) && (stcb->sctp_socket)) {
4834 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4835 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4836 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4838 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4840 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4841 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4845 if (asoc->fast_retran_loss_recovery && accum_moved) {
4846 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4847 /* Setup so we will exit RFC2582 fast recovery */
4848 will_exit_fast_recovery = 1;
4852 * Check for revoked fragments:
4854 * if Previous sack - Had no frags then we can't have any revoked if
4855 * Previous sack - Had frag's then - If we now have frags aka
4856 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4857 * some of them. else - The peer revoked all ACKED fragments, since
4858 * we had some before and now we have NONE.
4862 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4863 asoc->saw_sack_with_frags = 1;
4864 } else if (asoc->saw_sack_with_frags) {
4865 int cnt_revoked = 0;
4867 /* Peer revoked all dg's marked or acked */
4868 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4869 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4870 tp1->sent = SCTP_DATAGRAM_SENT;
4871 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4872 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4873 tp1->whoTo->flight_size,
4875 (uint32_t)(uintptr_t)tp1->whoTo,
4878 sctp_flight_size_increase(tp1);
4879 sctp_total_flight_increase(stcb, tp1);
4880 tp1->rec.data.chunk_was_revoked = 1;
4882 * To ensure that this increase in
4883 * flightsize, which is artificial, does not
4884 * throttle the sender, we also increase the
4885 * cwnd artificially.
4887 tp1->whoTo->cwnd += tp1->book_size;
4894 asoc->saw_sack_with_frags = 0;
4897 asoc->saw_sack_with_nr_frags = 1;
4899 asoc->saw_sack_with_nr_frags = 0;
4901 /* JRS - Use the congestion control given in the CC module */
4902 if (ecne_seen == 0) {
4903 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4904 if (net->net_ack2 > 0) {
4906 * Karn's rule applies to clearing error
4907 * count, this is optional.
4909 net->error_count = 0;
4910 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4911 /* addr came good */
4912 net->dest_state |= SCTP_ADDR_REACHABLE;
4913 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4914 0, (void *)net, SCTP_SO_NOT_LOCKED);
4917 if (net == stcb->asoc.primary_destination) {
4918 if (stcb->asoc.alternate) {
4920 * release the alternate,
4923 sctp_free_remote_addr(stcb->asoc.alternate);
4924 stcb->asoc.alternate = NULL;
4928 if (net->dest_state & SCTP_ADDR_PF) {
4929 net->dest_state &= ~SCTP_ADDR_PF;
4930 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4931 stcb->sctp_ep, stcb, net,
4932 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4933 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4934 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4935 /* Done with this net */
4938 /* restore any doubled timers */
4939 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4940 if (net->RTO < stcb->asoc.minrto) {
4941 net->RTO = stcb->asoc.minrto;
4943 if (net->RTO > stcb->asoc.maxrto) {
4944 net->RTO = stcb->asoc.maxrto;
4948 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4951 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4952 /* nothing left in-flight */
4953 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4954 /* stop all timers */
4955 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4957 SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4958 net->flight_size = 0;
4959 net->partial_bytes_acked = 0;
4961 asoc->total_flight = 0;
4962 asoc->total_flight_count = 0;
4965 /**********************************/
4966 /* Now what about shutdown issues */
4967 /**********************************/
4968 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4969 /* nothing left on sendqueue.. consider done */
4970 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4971 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4972 asoc->peers_rwnd, 0, 0, a_rwnd);
4974 asoc->peers_rwnd = a_rwnd;
4975 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4976 /* SWS sender side engages */
4977 asoc->peers_rwnd = 0;
4980 if ((asoc->stream_queue_cnt == 1) &&
4981 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4982 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4983 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4984 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4986 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4987 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4988 (asoc->stream_queue_cnt == 1) &&
4989 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4990 struct mbuf *op_err;
4994 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4995 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
4996 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4999 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5000 (asoc->stream_queue_cnt == 0)) {
5001 struct sctp_nets *netp;
5003 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5004 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5005 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5007 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5008 sctp_stop_timers_for_shutdown(stcb);
5009 if (asoc->alternate) {
5010 netp = asoc->alternate;
5012 netp = asoc->primary_destination;
5014 sctp_send_shutdown(stcb, netp);
5015 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5016 stcb->sctp_ep, stcb, netp);
5017 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5018 stcb->sctp_ep, stcb, NULL);
5020 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5021 (asoc->stream_queue_cnt == 0)) {
5022 struct sctp_nets *netp;
5024 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5025 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5026 sctp_stop_timers_for_shutdown(stcb);
5027 if (asoc->alternate) {
5028 netp = asoc->alternate;
5030 netp = asoc->primary_destination;
5032 sctp_send_shutdown_ack(stcb, netp);
5033 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5034 stcb->sctp_ep, stcb, netp);
5039 * Now here we are going to recycle net_ack for a different use...
5042 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5047 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5048 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5049 * automatically ensure that.
5051 if ((asoc->sctp_cmt_on_off > 0) &&
5052 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5053 (cmt_dac_flag == 0)) {
5054 this_sack_lowest_newack = cum_ack;
5056 if ((num_seg > 0) || (num_nr_seg > 0)) {
5057 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5058 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5060 /* JRS - Use the congestion control given in the CC module */
5061 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5063 /* Now are we exiting loss recovery ? */
5064 if (will_exit_fast_recovery) {
5065 /* Ok, we must exit fast recovery */
5066 asoc->fast_retran_loss_recovery = 0;
5068 if ((asoc->sat_t3_loss_recovery) &&
5069 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5070 /* end satellite t3 loss recovery */
5071 asoc->sat_t3_loss_recovery = 0;
5076 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5077 if (net->will_exit_fast_recovery) {
5078 /* Ok, we must exit fast recovery */
5079 net->fast_retran_loss_recovery = 0;
5083 /* Adjust and set the new rwnd value */
5084 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5085 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5086 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5088 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5089 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5090 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5091 /* SWS sender side engages */
5092 asoc->peers_rwnd = 0;
5094 if (asoc->peers_rwnd > old_rwnd) {
5095 win_probe_recovery = 1;
5099 * Now we must setup so we have a timer up for anyone with
5105 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5106 if (win_probe_recovery && (net->window_probe)) {
5107 win_probe_recovered = 1;
5109 * Find first chunk that was used with
5110 * window probe and clear the event. Put
5111 * it back into the send queue as if has
5114 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5115 if (tp1->window_probe) {
5116 sctp_window_probe_recovery(stcb, asoc, tp1);
5121 if (net->flight_size) {
5123 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5124 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5125 stcb->sctp_ep, stcb, net);
5127 if (net->window_probe) {
5128 net->window_probe = 0;
5131 if (net->window_probe) {
5133 * In window probes we must assure a timer
5134 * is still running there
5136 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5137 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5138 stcb->sctp_ep, stcb, net);
5140 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5141 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5143 SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5148 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5149 (asoc->sent_queue_retran_cnt == 0) &&
5150 (win_probe_recovered == 0) &&
5153 * huh, this should not happen unless all packets are
5154 * PR-SCTP and marked to skip of course.
5156 if (sctp_fs_audit(asoc)) {
5157 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5158 net->flight_size = 0;
5160 asoc->total_flight = 0;
5161 asoc->total_flight_count = 0;
5162 asoc->sent_queue_retran_cnt = 0;
5163 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5164 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5165 sctp_flight_size_increase(tp1);
5166 sctp_total_flight_increase(stcb, tp1);
5167 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5168 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5175 /*********************************************/
5176 /* Here we perform PR-SCTP procedures */
5178 /*********************************************/
5179 /* C1. update advancedPeerAckPoint */
5180 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5181 asoc->advanced_peer_ack_point = cum_ack;
5183 /* C2. try to further move advancedPeerAckPoint ahead */
5184 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5185 struct sctp_tmit_chunk *lchk;
5186 uint32_t old_adv_peer_ack_point;
5188 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5189 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5190 /* C3. See if we need to send a Fwd-TSN */
5191 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5193 * ISSUE with ECN, see FWD-TSN processing.
5195 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5196 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5197 0xee, cum_ack, asoc->advanced_peer_ack_point,
5198 old_adv_peer_ack_point);
5200 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5201 send_forward_tsn(stcb, asoc);
5203 /* try to FR fwd-tsn's that get lost too */
5204 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5205 send_forward_tsn(stcb, asoc);
5209 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5210 if (lchk->whoTo != NULL) {
5215 /* Assure a timer is up */
5216 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5217 stcb->sctp_ep, stcb, lchk->whoTo);
5220 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5221 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5223 stcb->asoc.peers_rwnd,
5224 stcb->asoc.total_flight,
5225 stcb->asoc.total_output_queue_size);
5230 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5233 uint32_t cum_ack, a_rwnd;
5235 cum_ack = ntohl(cp->cumulative_tsn_ack);
5236 /* Arrange so a_rwnd does NOT change */
5237 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5239 /* Now call the express sack handling */
5240 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5244 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5245 struct sctp_stream_in *strmin)
5247 struct sctp_queued_to_read *control, *ncontrol;
5248 struct sctp_association *asoc;
5250 int need_reasm_check = 0;
5253 mid = strmin->last_mid_delivered;
5255 * First deliver anything prior to and including the stream no that
5258 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5259 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5260 /* this is deliverable now */
5261 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5262 if (control->on_strm_q) {
5263 if (control->on_strm_q == SCTP_ON_ORDERED) {
5264 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5265 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5266 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5269 panic("strmin: %p ctl: %p unknown %d",
5270 strmin, control, control->on_strm_q);
5273 control->on_strm_q = 0;
5275 /* subtract pending on streams */
5276 if (asoc->size_on_all_streams >= control->length) {
5277 asoc->size_on_all_streams -= control->length;
5280 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5282 asoc->size_on_all_streams = 0;
5285 sctp_ucount_decr(asoc->cnt_on_all_streams);
5286 /* deliver it to at least the delivery-q */
5287 if (stcb->sctp_socket) {
5288 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5289 sctp_add_to_readq(stcb->sctp_ep, stcb,
5291 &stcb->sctp_socket->so_rcv,
5292 1, SCTP_READ_LOCK_HELD,
5293 SCTP_SO_NOT_LOCKED);
5296 /* Its a fragmented message */
5297 if (control->first_frag_seen) {
5299 * Make it so this is next to
5300 * deliver, we restore later
5302 strmin->last_mid_delivered = control->mid - 1;
5303 need_reasm_check = 1;
5308 /* no more delivery now. */
5312 if (need_reasm_check) {
5315 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5316 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5317 /* Restore the next to deliver unless we are ahead */
5318 strmin->last_mid_delivered = mid;
5321 /* Left the front Partial one on */
5324 need_reasm_check = 0;
5327 * now we must deliver things in queue the normal way if any are
5330 mid = strmin->last_mid_delivered + 1;
5331 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5332 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5333 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5334 /* this is deliverable now */
5335 if (control->on_strm_q) {
5336 if (control->on_strm_q == SCTP_ON_ORDERED) {
5337 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5338 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5339 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5342 panic("strmin: %p ctl: %p unknown %d",
5343 strmin, control, control->on_strm_q);
5346 control->on_strm_q = 0;
5348 /* subtract pending on streams */
5349 if (asoc->size_on_all_streams >= control->length) {
5350 asoc->size_on_all_streams -= control->length;
5353 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5355 asoc->size_on_all_streams = 0;
5358 sctp_ucount_decr(asoc->cnt_on_all_streams);
5359 /* deliver it to at least the delivery-q */
5360 strmin->last_mid_delivered = control->mid;
5361 if (stcb->sctp_socket) {
5362 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5363 sctp_add_to_readq(stcb->sctp_ep, stcb,
5365 &stcb->sctp_socket->so_rcv, 1,
5366 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5368 mid = strmin->last_mid_delivered + 1;
5370 /* Its a fragmented message */
5371 if (control->first_frag_seen) {
5373 * Make it so this is next to
5376 strmin->last_mid_delivered = control->mid - 1;
5377 need_reasm_check = 1;
5385 if (need_reasm_check) {
5386 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5391 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5392 struct sctp_association *asoc, struct sctp_stream_in *strm,
5393 struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5395 struct sctp_tmit_chunk *chk, *nchk;
5398 * For now large messages held on the stream reasm that are complete
5399 * will be tossed too. We could in theory do more work to spin
5400 * through and stop after dumping one msg aka seeing the start of a
5401 * new msg at the head, and call the delivery function... to see if
5402 * it can be delivered... But for now we just dump everything on the
5405 if (!asoc->idata_supported && !ordered &&
5406 control->first_frag_seen &&
5407 SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5410 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5411 /* Purge hanging chunks */
5412 if (!asoc->idata_supported && !ordered) {
5413 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5417 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5418 if (asoc->size_on_reasm_queue >= chk->send_size) {
5419 asoc->size_on_reasm_queue -= chk->send_size;
5422 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5424 asoc->size_on_reasm_queue = 0;
5427 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5429 sctp_m_freem(chk->data);
5432 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5434 if (!TAILQ_EMPTY(&control->reasm)) {
5435 /* This has to be old data, unordered */
5436 if (control->data) {
5437 sctp_m_freem(control->data);
5438 control->data = NULL;
5440 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5441 chk = TAILQ_FIRST(&control->reasm);
5442 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5443 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5444 sctp_add_chk_to_control(control, strm, stcb, asoc,
5445 chk, SCTP_READ_LOCK_HELD);
5447 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5450 if (control->on_strm_q == SCTP_ON_ORDERED) {
5451 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5452 if (asoc->size_on_all_streams >= control->length) {
5453 asoc->size_on_all_streams -= control->length;
5456 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5458 asoc->size_on_all_streams = 0;
5461 sctp_ucount_decr(asoc->cnt_on_all_streams);
5462 control->on_strm_q = 0;
5463 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5464 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5465 control->on_strm_q = 0;
5467 } else if (control->on_strm_q) {
5468 panic("strm: %p ctl: %p unknown %d",
5469 strm, control, control->on_strm_q);
5472 control->on_strm_q = 0;
5473 if (control->on_read_q == 0) {
5474 sctp_free_remote_addr(control->whoFrom);
5475 if (control->data) {
5476 sctp_m_freem(control->data);
5477 control->data = NULL;
5479 sctp_free_a_readq(stcb, control);
5484 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5485 struct sctp_forward_tsn_chunk *fwd,
5486 int *abort_flag, struct mbuf *m, int offset)
5488 /* The pr-sctp fwd tsn */
5490 * here we will perform all the data receiver side steps for
5491 * processing FwdTSN, as required in by pr-sctp draft:
5493 * Assume we get FwdTSN(x):
5495 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5496 * + others we have 3) examine and update re-ordering queue on
5497 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5498 * report where we are.
5500 struct sctp_association *asoc;
5501 uint32_t new_cum_tsn, gap;
5502 unsigned int i, fwd_sz, m_size;
5504 struct sctp_stream_in *strm;
5505 struct sctp_queued_to_read *control, *sv;
5508 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5509 SCTPDBG(SCTP_DEBUG_INDATA1,
5510 "Bad size too small/big fwd-tsn\n");
5513 m_size = (stcb->asoc.mapping_array_size << 3);
5514 /*************************************************************/
5515 /* 1. Here we update local cumTSN and shift the bitmap array */
5516 /*************************************************************/
5517 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5519 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5520 /* Already got there ... */
5524 * now we know the new TSN is more advanced, let's find the actual
5527 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5528 asoc->cumulative_tsn = new_cum_tsn;
5529 if (gap >= m_size) {
5530 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5531 struct mbuf *op_err;
5532 char msg[SCTP_DIAG_INFO_LEN];
5535 * out of range (of single byte chunks in the rwnd I
5536 * give out). This must be an attacker.
5539 SCTP_SNPRINTF(msg, sizeof(msg),
5540 "New cum ack %8.8x too high, highest TSN %8.8x",
5541 new_cum_tsn, asoc->highest_tsn_inside_map);
5542 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5543 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5544 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5547 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5549 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5550 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5551 asoc->highest_tsn_inside_map = new_cum_tsn;
5553 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5554 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5556 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5557 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5560 SCTP_TCB_LOCK_ASSERT(stcb);
5561 for (i = 0; i <= gap; i++) {
5562 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5563 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5564 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5565 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5566 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5571 /*************************************************************/
5572 /* 2. Clear up re-assembly queue */
5573 /*************************************************************/
5575 /* This is now done as part of clearing up the stream/seq */
5576 if (asoc->idata_supported == 0) {
5579 /* Flush all the un-ordered data based on cum-tsn */
5580 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5581 for (sid = 0; sid < asoc->streamincnt; sid++) {
5582 strm = &asoc->strmin[sid];
5583 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5584 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5587 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5589 /*******************************************************/
5590 /* 3. Update the PR-stream re-ordering queues and fix */
5591 /* delivery issues as needed. */
5592 /*******************************************************/
5593 fwd_sz -= sizeof(*fwd);
5596 unsigned int num_str;
5599 uint16_t ordered, flags;
5600 struct sctp_strseq *stseq, strseqbuf;
5601 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5603 offset += sizeof(*fwd);
5605 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5606 if (asoc->idata_supported) {
5607 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5609 num_str = fwd_sz / sizeof(struct sctp_strseq);
5611 for (i = 0; i < num_str; i++) {
5612 if (asoc->idata_supported) {
5613 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5614 sizeof(struct sctp_strseq_mid),
5615 (uint8_t *)&strseqbuf_m);
5616 offset += sizeof(struct sctp_strseq_mid);
5617 if (stseq_m == NULL) {
5620 sid = ntohs(stseq_m->sid);
5621 mid = ntohl(stseq_m->mid);
5622 flags = ntohs(stseq_m->flags);
5623 if (flags & PR_SCTP_UNORDERED_FLAG) {
5629 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5630 sizeof(struct sctp_strseq),
5631 (uint8_t *)&strseqbuf);
5632 offset += sizeof(struct sctp_strseq);
5633 if (stseq == NULL) {
5636 sid = ntohs(stseq->sid);
5637 mid = (uint32_t)ntohs(stseq->ssn);
5645 * Ok we now look for the stream/seq on the read
5646 * queue where its not all delivered. If we find it
5647 * we transmute the read entry into a PDI_ABORTED.
5649 if (sid >= asoc->streamincnt) {
5650 /* screwed up streams, stop! */
5653 if ((asoc->str_of_pdapi == sid) &&
5654 (asoc->ssn_of_pdapi == mid)) {
5656 * If this is the one we were partially
5657 * delivering now then we no longer are.
5658 * Note this will change with the reassembly
5661 asoc->fragmented_delivery_inprogress = 0;
5663 strm = &asoc->strmin[sid];
5665 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
5666 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5667 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5671 if (asoc->idata_supported) {
5672 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
5673 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5674 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5678 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5679 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5683 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5684 if ((control->sinfo_stream == sid) &&
5685 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5686 str_seq = (sid << 16) | (0x0000ffff & mid);
5687 control->pdapi_aborted = 1;
5688 sv = stcb->asoc.control_pdapi;
5689 control->end_added = 1;
5690 if (control->on_strm_q == SCTP_ON_ORDERED) {
5691 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5692 if (asoc->size_on_all_streams >= control->length) {
5693 asoc->size_on_all_streams -= control->length;
5696 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5698 asoc->size_on_all_streams = 0;
5701 sctp_ucount_decr(asoc->cnt_on_all_streams);
5702 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5703 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5705 } else if (control->on_strm_q) {
5706 panic("strm: %p ctl: %p unknown %d",
5707 strm, control, control->on_strm_q);
5710 control->on_strm_q = 0;
5711 stcb->asoc.control_pdapi = control;
5712 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5714 SCTP_PARTIAL_DELIVERY_ABORTED,
5716 SCTP_SO_NOT_LOCKED);
5717 stcb->asoc.control_pdapi = sv;
5719 } else if ((control->sinfo_stream == sid) &&
5720 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5721 /* We are past our victim SSN */
5725 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5726 /* Update the sequence number */
5727 strm->last_mid_delivered = mid;
5729 /* now kick the stream the new way */
5730 /* sa_ignore NO_NULL_CHK */
5731 sctp_kick_prsctp_reorder_queue(stcb, strm);
5733 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5736 * Now slide thing forward.
5738 sctp_slide_mapping_arrays(stcb);