2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int hold_rlock);
72 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
77 /* Calculate what the rwnd would be */
79 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
84 * This is really set wrong with respect to a 1-2-m socket. Since
85 * the sb_cc is the count that everyone as put up. When we re-write
86 * sctp_soreceive then we will fix this so that ONLY this
87 * associations data is taken into account.
89 if (stcb->sctp_socket == NULL) {
93 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
94 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
95 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
96 ("size_on_all_streams is %u", asoc->size_on_all_streams));
97 if (stcb->asoc.sb_cc == 0 &&
98 asoc->cnt_on_reasm_queue == 0 &&
99 asoc->cnt_on_all_streams == 0) {
100 /* Full rwnd granted */
101 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
104 /* get actual space */
105 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 * take out what has NOT been put on socket queue and we yet hold
110 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
111 asoc->cnt_on_reasm_queue * MSIZE));
112 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
113 asoc->cnt_on_all_streams * MSIZE));
119 /* what is the overhead of all these rwnd's */
120 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 * If the window gets too small due to ctrl-stuff, reduce it to 1,
123 * even it is 0. SWS engaged
125 if (calc < stcb->asoc.my_rwnd_control_len) {
132 * Build out our readq entry based on the incoming packet.
134 struct sctp_queued_to_read *
135 sctp_build_readq_entry(struct sctp_tcb *stcb,
136 struct sctp_nets *net,
137 uint32_t tsn, uint32_t ppid,
138 uint32_t context, uint16_t sid,
139 uint32_t mid, uint8_t flags,
142 struct sctp_queued_to_read *read_queue_e = NULL;
144 sctp_alloc_a_readq(stcb, read_queue_e);
145 if (read_queue_e == NULL) {
148 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
149 read_queue_e->sinfo_stream = sid;
150 read_queue_e->sinfo_flags = (flags << 8);
151 read_queue_e->sinfo_ppid = ppid;
152 read_queue_e->sinfo_context = context;
153 read_queue_e->sinfo_tsn = tsn;
154 read_queue_e->sinfo_cumtsn = tsn;
155 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
156 read_queue_e->mid = mid;
157 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
158 TAILQ_INIT(&read_queue_e->reasm);
159 read_queue_e->whoFrom = net;
160 atomic_add_int(&net->ref_count, 1);
161 read_queue_e->data = dm;
162 read_queue_e->stcb = stcb;
163 read_queue_e->port_from = stcb->rport;
164 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
165 read_queue_e->do_not_ref_stcb = 1;
168 return (read_queue_e);
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
174 struct sctp_extrcvinfo *seinfo;
175 struct sctp_sndrcvinfo *outinfo;
176 struct sctp_rcvinfo *rcvinfo;
177 struct sctp_nxtinfo *nxtinfo;
184 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 /* user does not want any ancillary data */
192 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
195 seinfo = (struct sctp_extrcvinfo *)sinfo;
196 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
199 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
206 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
209 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
215 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
220 SCTP_BUF_LEN(ret) = 0;
222 /* We need a CMSG header followed by the struct */
223 cmh = mtod(ret, struct cmsghdr *);
225 * Make sure that there is no un-initialized padding between the
226 * cmsg header and cmsg data and after the cmsg data.
229 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 cmh->cmsg_level = IPPROTO_SCTP;
231 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 cmh->cmsg_type = SCTP_RCVINFO;
233 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 rcvinfo->rcv_context = sinfo->sinfo_context;
241 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
246 cmh->cmsg_level = IPPROTO_SCTP;
247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 cmh->cmsg_type = SCTP_NXTINFO;
249 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 nxtinfo->nxt_flags = 0;
252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 nxtinfo->nxt_flags |= SCTP_UNORDERED;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 nxtinfo->nxt_flags |= SCTP_COMPLETE;
261 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
267 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 cmh->cmsg_level = IPPROTO_SCTP;
269 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 cmh->cmsg_type = SCTP_EXTRCV;
273 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 cmh->cmsg_type = SCTP_SNDRCV;
279 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
286 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
288 uint32_t gap, i, cumackp1;
290 int in_r = 0, in_nr = 0;
292 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
295 cumackp1 = asoc->cumulative_tsn + 1;
296 if (SCTP_TSN_GT(cumackp1, tsn)) {
298 * this tsn is behind the cum ack and thus we don't need to
299 * worry about it being moved from one to the other.
303 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
304 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
305 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
306 if ((in_r == 0) && (in_nr == 0)) {
308 panic("Things are really messed up now");
310 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
311 sctp_print_mapping_array(asoc);
315 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
318 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
319 asoc->highest_tsn_inside_nr_map = tsn;
321 if (tsn == asoc->highest_tsn_inside_map) {
322 /* We must back down to see what the new highest is */
323 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
324 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
325 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
326 asoc->highest_tsn_inside_map = i;
332 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
338 sctp_place_control_in_stream(struct sctp_stream_in *strm,
339 struct sctp_association *asoc,
340 struct sctp_queued_to_read *control)
342 struct sctp_queued_to_read *at;
343 struct sctp_readhead *q;
344 uint8_t flags, unordered;
346 flags = (control->sinfo_flags >> 8);
347 unordered = flags & SCTP_DATA_UNORDERED;
349 q = &strm->uno_inqueue;
350 if (asoc->idata_supported == 0) {
351 if (!TAILQ_EMPTY(q)) {
353 * Only one stream can be here in old style
358 TAILQ_INSERT_TAIL(q, control, next_instrm);
359 control->on_strm_q = SCTP_ON_UNORDERED;
365 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
366 control->end_added = 1;
367 control->first_frag_seen = 1;
368 control->last_frag_seen = 1;
370 if (TAILQ_EMPTY(q)) {
372 TAILQ_INSERT_HEAD(q, control, next_instrm);
374 control->on_strm_q = SCTP_ON_UNORDERED;
376 control->on_strm_q = SCTP_ON_ORDERED;
380 TAILQ_FOREACH(at, q, next_instrm) {
381 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
383 * one in queue is bigger than the new one,
384 * insert before this one
386 TAILQ_INSERT_BEFORE(at, control, next_instrm);
388 control->on_strm_q = SCTP_ON_UNORDERED;
390 control->on_strm_q = SCTP_ON_ORDERED;
393 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
395 * Gak, He sent me a duplicate msg id
396 * number?? return -1 to abort.
400 if (TAILQ_NEXT(at, next_instrm) == NULL) {
402 * We are at the end, insert it
405 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
406 sctp_log_strm_del(control, at,
407 SCTP_STR_LOG_FROM_INSERT_TL);
409 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
411 control->on_strm_q = SCTP_ON_UNORDERED;
413 control->on_strm_q = SCTP_ON_ORDERED;
424 sctp_abort_in_reasm(struct sctp_tcb *stcb,
425 struct sctp_queued_to_read *control,
426 struct sctp_tmit_chunk *chk,
427 int *abort_flag, int opspot)
429 char msg[SCTP_DIAG_INFO_LEN];
432 if (stcb->asoc.idata_supported) {
433 SCTP_SNPRINTF(msg, sizeof(msg),
434 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
436 control->fsn_included,
439 chk->rec.data.fsn, chk->rec.data.mid);
441 SCTP_SNPRINTF(msg, sizeof(msg),
442 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
444 control->fsn_included,
448 (uint16_t)chk->rec.data.mid);
450 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
451 sctp_m_freem(chk->data);
453 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
454 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
455 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
460 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
463 * The control could not be placed and must be cleaned.
465 struct sctp_tmit_chunk *chk, *nchk;
467 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
468 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
470 sctp_m_freem(chk->data);
472 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
474 sctp_free_remote_addr(control->whoFrom);
476 sctp_m_freem(control->data);
477 control->data = NULL;
479 sctp_free_a_readq(stcb, control);
483 * Queue the chunk either right into the socket buffer if it is the next one
484 * to go OR put it in the correct place in the delivery queue. If we do
485 * append to the so_buf, keep doing so until we are out of order as
486 * long as the control's entered are non-fragmented.
489 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
490 struct sctp_association *asoc,
491 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
494 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
495 * all the data in one stream this could happen quite rapidly. One
496 * could use the TSN to keep track of things, but this scheme breaks
497 * down in the other type of stream usage that could occur. Send a
498 * single msg to stream 0, send 4Billion messages to stream 1, now
499 * send a message to stream 0. You have a situation where the TSN
500 * has wrapped but not in the stream. Is this worth worrying about
501 * or should we just change our queue sort at the bottom to be by
504 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
505 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
506 * assignment this could happen... and I don't see how this would be
507 * a violation. So for now I am undecided an will leave the sort by
508 * SSN alone. Maybe a hybred approach is the answer
511 struct sctp_queued_to_read *at;
515 struct sctp_stream_in *strm;
516 char msg[SCTP_DIAG_INFO_LEN];
518 strm = &asoc->strmin[control->sinfo_stream];
519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
520 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
522 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
523 /* The incoming sseq is behind where we last delivered? */
524 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
525 strm->last_mid_delivered, control->mid);
527 * throw it in the stream so it gets cleaned up in
528 * association destruction
530 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
531 if (asoc->idata_supported) {
532 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
533 strm->last_mid_delivered, control->sinfo_tsn,
534 control->sinfo_stream, control->mid);
536 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
537 (uint16_t)strm->last_mid_delivered,
539 control->sinfo_stream,
540 (uint16_t)control->mid);
542 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
543 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
544 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
549 asoc->size_on_all_streams += control->length;
550 sctp_ucount_incr(asoc->cnt_on_all_streams);
551 nxt_todel = strm->last_mid_delivered + 1;
552 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
553 /* can be delivered right away? */
554 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
555 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
557 /* EY it wont be queued if it could be delivered directly */
559 if (asoc->size_on_all_streams >= control->length) {
560 asoc->size_on_all_streams -= control->length;
563 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
565 asoc->size_on_all_streams = 0;
568 sctp_ucount_decr(asoc->cnt_on_all_streams);
569 strm->last_mid_delivered++;
570 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
571 sctp_add_to_readq(stcb->sctp_ep, stcb,
573 &stcb->sctp_socket->so_rcv, 1,
574 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
575 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
577 nxt_todel = strm->last_mid_delivered + 1;
578 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
579 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
580 if (control->on_strm_q == SCTP_ON_ORDERED) {
581 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
582 if (asoc->size_on_all_streams >= control->length) {
583 asoc->size_on_all_streams -= control->length;
586 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
588 asoc->size_on_all_streams = 0;
591 sctp_ucount_decr(asoc->cnt_on_all_streams);
594 panic("Huh control: %p is on_strm_q: %d",
595 control, control->on_strm_q);
598 control->on_strm_q = 0;
599 strm->last_mid_delivered++;
601 * We ignore the return of deliver_data here
602 * since we always can hold the chunk on the
603 * d-queue. And we have a finite number that
604 * can be delivered from the strq.
606 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
607 sctp_log_strm_del(control, NULL,
608 SCTP_STR_LOG_FROM_IMMED_DEL);
610 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
611 sctp_add_to_readq(stcb->sctp_ep, stcb,
613 &stcb->sctp_socket->so_rcv, 1,
614 SCTP_READ_LOCK_NOT_HELD,
617 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
625 * Ok, we did not deliver this guy, find the correct place
626 * to put it on the queue.
628 if (sctp_place_control_in_stream(strm, asoc, control)) {
629 SCTP_SNPRINTF(msg, sizeof(msg),
630 "Queue to str MID: %u duplicate", control->mid);
631 sctp_clean_up_control(stcb, control);
632 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
633 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
634 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
641 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
643 struct mbuf *m, *prev = NULL;
644 struct sctp_tcb *stcb;
646 stcb = control->stcb;
647 control->held_length = 0;
651 if (SCTP_BUF_LEN(m) == 0) {
652 /* Skip mbufs with NO length */
655 control->data = sctp_m_free(m);
658 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
659 m = SCTP_BUF_NEXT(prev);
662 control->tail_mbuf = prev;
667 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
668 if (control->on_read_q) {
670 * On read queue so we must increment the SB stuff,
671 * we assume caller has done any locks of SB.
673 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
675 m = SCTP_BUF_NEXT(m);
678 control->tail_mbuf = prev;
683 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
685 struct mbuf *prev = NULL;
686 struct sctp_tcb *stcb;
688 stcb = control->stcb;
691 panic("Control broken");
696 if (control->tail_mbuf == NULL) {
698 sctp_m_freem(control->data);
700 sctp_setup_tail_pointer(control);
703 control->tail_mbuf->m_next = m;
705 if (SCTP_BUF_LEN(m) == 0) {
706 /* Skip mbufs with NO length */
709 control->tail_mbuf->m_next = sctp_m_free(m);
710 m = control->tail_mbuf->m_next;
712 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
713 m = SCTP_BUF_NEXT(prev);
716 control->tail_mbuf = prev;
721 if (control->on_read_q) {
723 * On read queue so we must increment the SB stuff,
724 * we assume caller has done any locks of SB.
726 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
728 *added += SCTP_BUF_LEN(m);
729 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
730 m = SCTP_BUF_NEXT(m);
733 control->tail_mbuf = prev;
738 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
740 memset(nc, 0, sizeof(struct sctp_queued_to_read));
741 nc->sinfo_stream = control->sinfo_stream;
742 nc->mid = control->mid;
743 TAILQ_INIT(&nc->reasm);
744 nc->top_fsn = control->top_fsn;
745 nc->mid = control->mid;
746 nc->sinfo_flags = control->sinfo_flags;
747 nc->sinfo_ppid = control->sinfo_ppid;
748 nc->sinfo_context = control->sinfo_context;
749 nc->fsn_included = 0xffffffff;
750 nc->sinfo_tsn = control->sinfo_tsn;
751 nc->sinfo_cumtsn = control->sinfo_cumtsn;
752 nc->sinfo_assoc_id = control->sinfo_assoc_id;
753 nc->whoFrom = control->whoFrom;
754 atomic_add_int(&nc->whoFrom->ref_count, 1);
755 nc->stcb = control->stcb;
756 nc->port_from = control->port_from;
757 nc->do_not_ref_stcb = control->do_not_ref_stcb;
761 sctp_reset_a_control(struct sctp_queued_to_read *control,
762 struct sctp_inpcb *inp, uint32_t tsn)
764 control->fsn_included = tsn;
765 if (control->on_read_q) {
767 * We have to purge it from there, hopefully this will work
770 TAILQ_REMOVE(&inp->read_queue, control, next);
771 control->on_read_q = 0;
776 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
777 struct sctp_association *asoc,
778 struct sctp_stream_in *strm,
779 struct sctp_queued_to_read *control,
781 int inp_read_lock_held)
784 * Special handling for the old un-ordered data chunk. All the
785 * chunks/TSN's go to mid 0. So we have to do the old style watching
786 * to see if we have it all. If you return one, no other control
787 * entries on the un-ordered queue will be looked at. In theory
788 * there should be no others entries in reality, unless the guy is
789 * sending both unordered NDATA and unordered DATA...
791 struct sctp_tmit_chunk *chk, *lchk, *tchk;
793 struct sctp_queued_to_read *nc;
796 if (control->first_frag_seen == 0) {
797 /* Nothing we can do, we have not seen the first piece yet */
800 /* Collapse any we can */
803 fsn = control->fsn_included + 1;
804 /* Now what can we add? */
805 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
806 if (chk->rec.data.fsn == fsn) {
808 sctp_alloc_a_readq(stcb, nc);
812 memset(nc, 0, sizeof(struct sctp_queued_to_read));
813 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
814 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
818 if (control->end_added) {
820 if (!TAILQ_EMPTY(&control->reasm)) {
822 * Ok we have to move anything left
823 * on the control queue to a new
826 sctp_build_readq_entry_from_ctl(nc, control);
827 tchk = TAILQ_FIRST(&control->reasm);
828 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
829 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
830 if (asoc->size_on_reasm_queue >= tchk->send_size) {
831 asoc->size_on_reasm_queue -= tchk->send_size;
834 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
836 asoc->size_on_reasm_queue = 0;
839 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
840 nc->first_frag_seen = 1;
841 nc->fsn_included = tchk->rec.data.fsn;
842 nc->data = tchk->data;
843 nc->sinfo_ppid = tchk->rec.data.ppid;
844 nc->sinfo_tsn = tchk->rec.data.tsn;
845 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
847 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
848 sctp_setup_tail_pointer(nc);
849 tchk = TAILQ_FIRST(&control->reasm);
851 /* Spin the rest onto the queue */
853 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
854 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
855 tchk = TAILQ_FIRST(&control->reasm);
858 * Now lets add it to the queue
859 * after removing control
861 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
862 nc->on_strm_q = SCTP_ON_UNORDERED;
863 if (control->on_strm_q) {
864 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
865 control->on_strm_q = 0;
868 if (control->pdapi_started) {
869 strm->pd_api_started = 0;
870 control->pdapi_started = 0;
872 if (control->on_strm_q) {
873 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
874 control->on_strm_q = 0;
875 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
877 if (control->on_read_q == 0) {
878 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
879 &stcb->sctp_socket->so_rcv, control->end_added,
880 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
882 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
883 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
885 * Switch to the new guy and
891 if (nc->on_strm_q == 0) {
892 sctp_free_a_readq(stcb, nc);
897 sctp_free_a_readq(stcb, nc);
904 if (cnt_added && strm->pd_api_started) {
905 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
907 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
908 strm->pd_api_started = 1;
909 control->pdapi_started = 1;
910 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
911 &stcb->sctp_socket->so_rcv, control->end_added,
912 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
913 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
921 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
922 struct sctp_association *asoc,
923 struct sctp_queued_to_read *control,
924 struct sctp_tmit_chunk *chk,
927 struct sctp_tmit_chunk *at;
931 * Here we need to place the chunk into the control structure sorted
932 * in the correct order.
934 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
935 /* Its the very first one. */
936 SCTPDBG(SCTP_DEBUG_XXX,
937 "chunk is a first fsn: %u becomes fsn_included\n",
939 at = TAILQ_FIRST(&control->reasm);
940 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
942 * The first chunk in the reassembly is a smaller
943 * TSN than this one, even though this has a first,
944 * it must be from a subsequent msg.
948 if (control->first_frag_seen) {
950 * In old un-ordered we can reassembly on one
951 * control multiple messages. As long as the next
952 * FIRST is greater then the old first (TSN i.e. FSN
958 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
960 * Easy way the start of a new guy beyond
965 if ((chk->rec.data.fsn == control->fsn_included) ||
966 (control->pdapi_started)) {
968 * Ok this should not happen, if it does we
969 * started the pd-api on the higher TSN
970 * (since the equals part is a TSN failure
973 * We are completly hosed in that case since
974 * I have no way to recover. This really
975 * will only happen if we can get more TSN's
976 * higher before the pd-api-point.
978 sctp_abort_in_reasm(stcb, control, chk,
980 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
985 * Ok we have two firsts and the one we just got is
986 * smaller than the one we previously placed.. yuck!
987 * We must swap them out.
990 tdata = control->data;
991 control->data = chk->data;
993 /* Save the lengths */
994 chk->send_size = control->length;
995 /* Recompute length of control and tail pointer */
996 sctp_setup_tail_pointer(control);
997 /* Fix the FSN included */
998 tmp = control->fsn_included;
999 control->fsn_included = chk->rec.data.fsn;
1000 chk->rec.data.fsn = tmp;
1001 /* Fix the TSN included */
1002 tmp = control->sinfo_tsn;
1003 control->sinfo_tsn = chk->rec.data.tsn;
1004 chk->rec.data.tsn = tmp;
1005 /* Fix the PPID included */
1006 tmp = control->sinfo_ppid;
1007 control->sinfo_ppid = chk->rec.data.ppid;
1008 chk->rec.data.ppid = tmp;
1009 /* Fix tail pointer */
1012 control->first_frag_seen = 1;
1013 control->fsn_included = chk->rec.data.fsn;
1014 control->top_fsn = chk->rec.data.fsn;
1015 control->sinfo_tsn = chk->rec.data.tsn;
1016 control->sinfo_ppid = chk->rec.data.ppid;
1017 control->data = chk->data;
1018 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1020 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1021 sctp_setup_tail_pointer(control);
1026 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1027 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1029 * This one in queue is bigger than the new one,
1030 * insert the new one before at.
1032 asoc->size_on_reasm_queue += chk->send_size;
1033 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1035 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1037 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1039 * They sent a duplicate fsn number. This really
1040 * should not happen since the FSN is a TSN and it
1041 * should have been dropped earlier.
1043 sctp_abort_in_reasm(stcb, control, chk,
1045 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1049 if (inserted == 0) {
1050 /* Its at the end */
1051 asoc->size_on_reasm_queue += chk->send_size;
1052 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1053 control->top_fsn = chk->rec.data.fsn;
1054 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1059 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1060 struct sctp_stream_in *strm, int inp_read_lock_held)
1063 * Given a stream, strm, see if any of the SSN's on it that are
1064 * fragmented are ready to deliver. If so go ahead and place them on
1065 * the read queue. In so placing if we have hit the end, then we
1066 * need to remove them from the stream's queue.
1068 struct sctp_queued_to_read *control, *nctl = NULL;
1069 uint32_t next_to_del;
1073 if (stcb->sctp_socket) {
1074 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1075 stcb->sctp_ep->partial_delivery_point);
1077 pd_point = stcb->sctp_ep->partial_delivery_point;
1079 control = TAILQ_FIRST(&strm->uno_inqueue);
1081 if ((control != NULL) &&
1082 (asoc->idata_supported == 0)) {
1083 /* Special handling needed for "old" data format */
1084 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1088 if (strm->pd_api_started) {
1089 /* Can't add more */
1093 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1094 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1095 nctl = TAILQ_NEXT(control, next_instrm);
1096 if (control->end_added) {
1097 /* We just put the last bit on */
1098 if (control->on_strm_q) {
1100 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1101 panic("Huh control: %p on_q: %d -- not unordered?",
1102 control, control->on_strm_q);
1105 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1106 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1107 if (asoc->size_on_all_streams >= control->length) {
1108 asoc->size_on_all_streams -= control->length;
1111 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1113 asoc->size_on_all_streams = 0;
1116 sctp_ucount_decr(asoc->cnt_on_all_streams);
1117 control->on_strm_q = 0;
1119 if (control->on_read_q == 0) {
1120 sctp_add_to_readq(stcb->sctp_ep, stcb,
1122 &stcb->sctp_socket->so_rcv, control->end_added,
1123 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1126 /* Can we do a PD-API for this un-ordered guy? */
1127 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1128 strm->pd_api_started = 1;
1129 control->pdapi_started = 1;
1130 sctp_add_to_readq(stcb->sctp_ep, stcb,
1132 &stcb->sctp_socket->so_rcv, control->end_added,
1133 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1141 control = TAILQ_FIRST(&strm->inqueue);
1142 if (strm->pd_api_started) {
1143 /* Can't add more */
1146 if (control == NULL) {
1149 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1151 * Ok the guy at the top was being partially delivered
1152 * completed, so we remove it. Note the pd_api flag was
1153 * taken off when the chunk was merged on in
1154 * sctp_queue_data_for_reasm below.
1156 nctl = TAILQ_NEXT(control, next_instrm);
1157 SCTPDBG(SCTP_DEBUG_XXX,
1158 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1159 control, control->end_added, control->mid,
1160 control->top_fsn, control->fsn_included,
1161 strm->last_mid_delivered);
1162 if (control->end_added) {
1163 if (control->on_strm_q) {
1165 if (control->on_strm_q != SCTP_ON_ORDERED) {
1166 panic("Huh control: %p on_q: %d -- not ordered?",
1167 control, control->on_strm_q);
1170 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1171 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1172 if (asoc->size_on_all_streams >= control->length) {
1173 asoc->size_on_all_streams -= control->length;
1176 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1178 asoc->size_on_all_streams = 0;
1181 sctp_ucount_decr(asoc->cnt_on_all_streams);
1182 control->on_strm_q = 0;
1184 if (strm->pd_api_started && control->pdapi_started) {
1185 control->pdapi_started = 0;
1186 strm->pd_api_started = 0;
1188 if (control->on_read_q == 0) {
1189 sctp_add_to_readq(stcb->sctp_ep, stcb,
1191 &stcb->sctp_socket->so_rcv, control->end_added,
1192 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1197 if (strm->pd_api_started) {
1199 * Can't add more must have gotten an un-ordered above being
1200 * partially delivered.
1205 next_to_del = strm->last_mid_delivered + 1;
1207 SCTPDBG(SCTP_DEBUG_XXX,
1208 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1209 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1211 nctl = TAILQ_NEXT(control, next_instrm);
1212 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1213 (control->first_frag_seen)) {
1216 /* Ok we can deliver it onto the stream. */
1217 if (control->end_added) {
1218 /* We are done with it afterwards */
1219 if (control->on_strm_q) {
1221 if (control->on_strm_q != SCTP_ON_ORDERED) {
1222 panic("Huh control: %p on_q: %d -- not ordered?",
1223 control, control->on_strm_q);
1226 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1227 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1228 if (asoc->size_on_all_streams >= control->length) {
1229 asoc->size_on_all_streams -= control->length;
1232 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1234 asoc->size_on_all_streams = 0;
1237 sctp_ucount_decr(asoc->cnt_on_all_streams);
1238 control->on_strm_q = 0;
1242 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1244 * A singleton now slipping through - mark
1245 * it non-revokable too
1247 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1248 } else if (control->end_added == 0) {
1250 * Check if we can defer adding until its
1253 if ((control->length < pd_point) || (strm->pd_api_started)) {
1255 * Don't need it or cannot add more
1256 * (one being delivered that way)
1261 done = (control->end_added) && (control->last_frag_seen);
1262 if (control->on_read_q == 0) {
1264 if (asoc->size_on_all_streams >= control->length) {
1265 asoc->size_on_all_streams -= control->length;
1268 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1270 asoc->size_on_all_streams = 0;
1273 strm->pd_api_started = 1;
1274 control->pdapi_started = 1;
1276 sctp_add_to_readq(stcb->sctp_ep, stcb,
1278 &stcb->sctp_socket->so_rcv, control->end_added,
1279 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1281 strm->last_mid_delivered = next_to_del;
1293 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1294 struct sctp_stream_in *strm,
1295 struct sctp_tcb *stcb, struct sctp_association *asoc,
1296 struct sctp_tmit_chunk *chk, int hold_rlock)
1299 * Given a control and a chunk, merge the data from the chk onto the
1300 * control and free up the chunk resources.
1305 if (control->on_read_q && (hold_rlock == 0)) {
1307 * Its being pd-api'd so we must do some locks.
1309 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1312 if (control->data == NULL) {
1313 control->data = chk->data;
1314 sctp_setup_tail_pointer(control);
1316 sctp_add_to_tail_pointer(control, chk->data, &added);
1318 control->fsn_included = chk->rec.data.fsn;
1319 asoc->size_on_reasm_queue -= chk->send_size;
1320 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1321 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1323 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1324 control->first_frag_seen = 1;
1325 control->sinfo_tsn = chk->rec.data.tsn;
1326 control->sinfo_ppid = chk->rec.data.ppid;
1328 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1330 if ((control->on_strm_q) && (control->on_read_q)) {
1331 if (control->pdapi_started) {
1332 control->pdapi_started = 0;
1333 strm->pd_api_started = 0;
1335 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1337 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1338 control->on_strm_q = 0;
1339 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1341 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1343 * Don't need to decrement
1344 * size_on_all_streams, since control is on
1347 sctp_ucount_decr(asoc->cnt_on_all_streams);
1348 control->on_strm_q = 0;
1350 } else if (control->on_strm_q) {
1351 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1352 control->on_strm_q);
1356 control->end_added = 1;
1357 control->last_frag_seen = 1;
1360 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1362 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1367 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1368 * queue, see if anthing can be delivered. If so pull it off (or as much as
1369 * we can. If we run out of space then we must dump what we can and set the
1370 * appropriate flag to say we queued what we could.
1373 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1374 struct sctp_queued_to_read *control,
1375 struct sctp_tmit_chunk *chk,
1376 int created_control,
1377 int *abort_flag, uint32_t tsn)
1380 struct sctp_tmit_chunk *at, *nat;
1381 struct sctp_stream_in *strm;
1382 int do_wakeup, unordered;
1385 strm = &asoc->strmin[control->sinfo_stream];
1387 * For old un-ordered data chunks.
1389 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1394 /* Must be added to the stream-in queue */
1395 if (created_control) {
1396 if ((unordered == 0) || (asoc->idata_supported)) {
1397 sctp_ucount_incr(asoc->cnt_on_all_streams);
1399 if (sctp_place_control_in_stream(strm, asoc, control)) {
1400 /* Duplicate SSN? */
1401 sctp_abort_in_reasm(stcb, control, chk,
1403 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1404 sctp_clean_up_control(stcb, control);
1407 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1409 * Ok we created this control and now lets validate
1410 * that its legal i.e. there is a B bit set, if not
1411 * and we have up to the cum-ack then its invalid.
1413 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1414 sctp_abort_in_reasm(stcb, control, chk,
1416 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1421 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1422 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1426 * Ok we must queue the chunk into the reasembly portion: o if its
1427 * the first it goes to the control mbuf. o if its not first but the
1428 * next in sequence it goes to the control, and each succeeding one
1429 * in order also goes. o if its not in order we place it on the list
1432 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1433 /* Its the very first one. */
1434 SCTPDBG(SCTP_DEBUG_XXX,
1435 "chunk is a first fsn: %u becomes fsn_included\n",
1437 if (control->first_frag_seen) {
1439 * Error on senders part, they either sent us two
1440 * data chunks with FIRST, or they sent two
1441 * un-ordered chunks that were fragmented at the
1442 * same time in the same stream.
1444 sctp_abort_in_reasm(stcb, control, chk,
1446 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1449 control->first_frag_seen = 1;
1450 control->sinfo_ppid = chk->rec.data.ppid;
1451 control->sinfo_tsn = chk->rec.data.tsn;
1452 control->fsn_included = chk->rec.data.fsn;
1453 control->data = chk->data;
1454 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1456 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1457 sctp_setup_tail_pointer(control);
1458 asoc->size_on_all_streams += control->length;
1460 /* Place the chunk in our list */
1463 if (control->last_frag_seen == 0) {
1464 /* Still willing to raise highest FSN seen */
1465 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1466 SCTPDBG(SCTP_DEBUG_XXX,
1467 "We have a new top_fsn: %u\n",
1469 control->top_fsn = chk->rec.data.fsn;
1471 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1472 SCTPDBG(SCTP_DEBUG_XXX,
1473 "The last fsn is now in place fsn: %u\n",
1475 control->last_frag_seen = 1;
1476 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1477 SCTPDBG(SCTP_DEBUG_XXX,
1478 "New fsn: %u is not at top_fsn: %u -- abort\n",
1481 sctp_abort_in_reasm(stcb, control, chk,
1483 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1487 if (asoc->idata_supported || control->first_frag_seen) {
1489 * For IDATA we always check since we know
1490 * that the first fragment is 0. For old
1491 * DATA we have to receive the first before
1492 * we know the first FSN (which is the TSN).
1494 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1496 * We have already delivered up to
1499 sctp_abort_in_reasm(stcb, control, chk,
1501 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1506 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1507 /* Second last? huh? */
1508 SCTPDBG(SCTP_DEBUG_XXX,
1509 "Duplicate last fsn: %u (top: %u) -- abort\n",
1510 chk->rec.data.fsn, control->top_fsn);
1511 sctp_abort_in_reasm(stcb, control,
1513 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1516 if (asoc->idata_supported || control->first_frag_seen) {
1518 * For IDATA we always check since we know
1519 * that the first fragment is 0. For old
1520 * DATA we have to receive the first before
1521 * we know the first FSN (which is the TSN).
1524 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1526 * We have already delivered up to
1529 SCTPDBG(SCTP_DEBUG_XXX,
1530 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1531 chk->rec.data.fsn, control->fsn_included);
1532 sctp_abort_in_reasm(stcb, control, chk,
1534 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1539 * validate not beyond top FSN if we have seen last
1542 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1543 SCTPDBG(SCTP_DEBUG_XXX,
1544 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1547 sctp_abort_in_reasm(stcb, control, chk,
1549 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1554 * If we reach here, we need to place the new chunk in the
1555 * reassembly for this control.
1557 SCTPDBG(SCTP_DEBUG_XXX,
1558 "chunk is a not first fsn: %u needs to be inserted\n",
1560 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1561 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1562 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1563 /* Last not at the end? huh? */
1564 SCTPDBG(SCTP_DEBUG_XXX,
1565 "Last fragment not last in list: -- abort\n");
1566 sctp_abort_in_reasm(stcb, control,
1568 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1572 * This one in queue is bigger than the new
1573 * one, insert the new one before at.
1575 SCTPDBG(SCTP_DEBUG_XXX,
1576 "Insert it before fsn: %u\n",
1578 asoc->size_on_reasm_queue += chk->send_size;
1579 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1580 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1583 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1585 * Gak, He sent me a duplicate str seq
1589 * foo bar, I guess I will just free this
1590 * new guy, should we abort too? FIX ME
1591 * MAYBE? Or it COULD be that the SSN's have
1592 * wrapped. Maybe I should compare to TSN
1593 * somehow... sigh for now just blow away
1596 SCTPDBG(SCTP_DEBUG_XXX,
1597 "Duplicate to fsn: %u -- abort\n",
1599 sctp_abort_in_reasm(stcb, control,
1601 SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1605 if (inserted == 0) {
1606 /* Goes on the end */
1607 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1609 asoc->size_on_reasm_queue += chk->send_size;
1610 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1611 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1615 * Ok lets see if we can suck any up into the control structure that
1616 * are in seq if it makes sense.
1620 * If the first fragment has not been seen there is no sense in
1623 if (control->first_frag_seen) {
1624 next_fsn = control->fsn_included + 1;
1625 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1626 if (at->rec.data.fsn == next_fsn) {
1627 /* We can add this one now to the control */
1628 SCTPDBG(SCTP_DEBUG_XXX,
1629 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1632 next_fsn, control->fsn_included);
1633 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1634 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1635 if (control->on_read_q) {
1639 * We only add to the
1640 * size-on-all-streams if its not on
1641 * the read q. The read q flag will
1642 * cause a sballoc so its accounted
1645 asoc->size_on_all_streams += lenadded;
1648 if (control->end_added && control->pdapi_started) {
1649 if (strm->pd_api_started) {
1650 strm->pd_api_started = 0;
1651 control->pdapi_started = 0;
1653 if (control->on_read_q == 0) {
1654 sctp_add_to_readq(stcb->sctp_ep, stcb,
1656 &stcb->sctp_socket->so_rcv, control->end_added,
1657 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1667 /* Need to wakeup the reader */
1668 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1672 static struct sctp_queued_to_read *
1673 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1675 struct sctp_queued_to_read *control;
1678 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1679 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1684 if (idata_supported) {
1685 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1686 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1691 control = TAILQ_FIRST(&strm->uno_inqueue);
1698 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1699 struct mbuf **m, int offset, int chk_length,
1700 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1701 int *break_flag, int last_chunk, uint8_t chk_type)
1703 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1704 struct sctp_stream_in *strm;
1705 uint32_t tsn, fsn, gap, mid;
1708 int need_reasm_check = 0;
1710 struct mbuf *op_err;
1711 char msg[SCTP_DIAG_INFO_LEN];
1712 struct sctp_queued_to_read *control, *ncontrol;
1715 struct sctp_stream_reset_list *liste;
1718 int created_control = 0;
1720 if (chk_type == SCTP_IDATA) {
1721 struct sctp_idata_chunk *chunk, chunk_buf;
1723 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1724 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1725 chk_flags = chunk->ch.chunk_flags;
1726 clen = sizeof(struct sctp_idata_chunk);
1727 tsn = ntohl(chunk->dp.tsn);
1728 sid = ntohs(chunk->dp.sid);
1729 mid = ntohl(chunk->dp.mid);
1730 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1732 ppid = chunk->dp.ppid_fsn.ppid;
1734 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1735 ppid = 0xffffffff; /* Use as an invalid value. */
1738 struct sctp_data_chunk *chunk, chunk_buf;
1740 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1741 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1742 chk_flags = chunk->ch.chunk_flags;
1743 clen = sizeof(struct sctp_data_chunk);
1744 tsn = ntohl(chunk->dp.tsn);
1745 sid = ntohs(chunk->dp.sid);
1746 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1748 ppid = chunk->dp.ppid;
1750 if ((size_t)chk_length == clen) {
1752 * Need to send an abort since we had a empty data chunk.
1754 op_err = sctp_generate_no_user_data_cause(tsn);
1755 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1756 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1760 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1761 asoc->send_sack = 1;
1763 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1764 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1765 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1770 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1771 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1772 /* It is a duplicate */
1773 SCTP_STAT_INCR(sctps_recvdupdata);
1774 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1775 /* Record a dup for the next outbound sack */
1776 asoc->dup_tsns[asoc->numduptsns] = tsn;
1779 asoc->send_sack = 1;
1782 /* Calculate the number of TSN's between the base and this TSN */
1783 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1784 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1785 /* Can't hold the bit in the mapping at max array, toss it */
1788 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1789 SCTP_TCB_LOCK_ASSERT(stcb);
1790 if (sctp_expand_mapping_array(asoc, gap)) {
1791 /* Can't expand, drop it */
1795 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1798 /* See if we have received this one already */
1799 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1800 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1801 SCTP_STAT_INCR(sctps_recvdupdata);
1802 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1803 /* Record a dup for the next outbound sack */
1804 asoc->dup_tsns[asoc->numduptsns] = tsn;
1807 asoc->send_sack = 1;
1811 * Check to see about the GONE flag, duplicates would cause a sack
1812 * to be sent up above
1814 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1815 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1816 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1818 * wait a minute, this guy is gone, there is no longer a
1819 * receiver. Send peer an ABORT!
1821 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1822 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1827 * Now before going further we see if there is room. If NOT then we
1828 * MAY let one through only IF this TSN is the one we are waiting
1829 * for on a partial delivery API.
1832 /* Is the stream valid? */
1833 if (sid >= asoc->streamincnt) {
1834 struct sctp_error_invalid_stream *cause;
1836 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1837 0, M_NOWAIT, 1, MT_DATA);
1838 if (op_err != NULL) {
1839 /* add some space up front so prepend will work well */
1840 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1841 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1843 * Error causes are just param's and this one has
1844 * two back to back phdr, one with the error type
1845 * and size, the other with the streamid and a rsvd
1847 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1848 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1849 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1850 cause->stream_id = htons(sid);
1851 cause->reserved = htons(0);
1852 sctp_queue_op_err(stcb, op_err);
1854 SCTP_STAT_INCR(sctps_badsid);
1855 SCTP_TCB_LOCK_ASSERT(stcb);
1856 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1857 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1858 asoc->highest_tsn_inside_nr_map = tsn;
1860 if (tsn == (asoc->cumulative_tsn + 1)) {
1861 /* Update cum-ack */
1862 asoc->cumulative_tsn = tsn;
1867 * If its a fragmented message, lets see if we can find the control
1868 * on the reassembly queues.
1870 if ((chk_type == SCTP_IDATA) &&
1871 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1874 * The first *must* be fsn 0, and other (middle/end) pieces
1875 * can *not* be fsn 0. XXX: This can happen in case of a
1876 * wrap around. Ignore is for now.
1878 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1881 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1882 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1883 chk_flags, control);
1884 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1885 /* See if we can find the re-assembly entity */
1886 if (control != NULL) {
1887 /* We found something, does it belong? */
1888 if (ordered && (mid != control->mid)) {
1889 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1891 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1892 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1893 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1897 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1899 * We can't have a switched order with an
1902 SCTP_SNPRINTF(msg, sizeof(msg),
1903 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1907 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1909 * We can't have a switched unordered with a
1912 SCTP_SNPRINTF(msg, sizeof(msg),
1913 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1920 * Its a complete segment. Lets validate we don't have a
1921 * re-assembly going on with the same Stream/Seq (for
1922 * ordered) or in the same Stream for unordered.
1924 if (control != NULL) {
1925 if (ordered || asoc->idata_supported) {
1926 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1928 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1931 if ((tsn == control->fsn_included + 1) &&
1932 (control->end_added == 0)) {
1933 SCTP_SNPRINTF(msg, sizeof(msg),
1934 "Illegal message sequence, missing end for MID: %8.8x",
1935 control->fsn_included);
1943 /* now do the tests */
1944 if (((asoc->cnt_on_all_streams +
1945 asoc->cnt_on_reasm_queue +
1946 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1947 (((int)asoc->my_rwnd) <= 0)) {
1949 * When we have NO room in the rwnd we check to make sure
1950 * the reader is doing its job...
1952 if (stcb->sctp_socket->so_rcv.sb_cc) {
1953 /* some to read, wake-up */
1954 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1956 /* now is it in the mapping array of what we have accepted? */
1957 if (chk_type == SCTP_DATA) {
1958 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1959 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1960 /* Nope not in the valid range dump it */
1962 sctp_set_rwnd(stcb, asoc);
1963 if ((asoc->cnt_on_all_streams +
1964 asoc->cnt_on_reasm_queue +
1965 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1966 SCTP_STAT_INCR(sctps_datadropchklmt);
1968 SCTP_STAT_INCR(sctps_datadroprwnd);
1974 if (control == NULL) {
1977 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1982 #ifdef SCTP_ASOCLOG_OF_TSNS
1983 SCTP_TCB_LOCK_ASSERT(stcb);
1984 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1985 asoc->tsn_in_at = 0;
1986 asoc->tsn_in_wrapped = 1;
1988 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1989 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1990 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1991 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1992 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1993 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1994 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1995 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1999 * Before we continue lets validate that we are not being fooled by
2000 * an evil attacker. We can only have Nk chunks based on our TSN
2001 * spread allowed by the mapping array N * 8 bits, so there is no
2002 * way our stream sequence numbers could have wrapped. We of course
2003 * only validate the FIRST fragment so the bit must be set.
2005 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2006 (TAILQ_EMPTY(&asoc->resetHead)) &&
2007 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2008 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2009 /* The incoming sseq is behind where we last delivered? */
2010 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2011 mid, asoc->strmin[sid].last_mid_delivered);
2013 if (asoc->idata_supported) {
2014 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2015 asoc->strmin[sid].last_mid_delivered,
2020 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2021 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2026 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2027 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2028 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2032 if (chk_type == SCTP_IDATA) {
2033 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2035 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2037 if (last_chunk == 0) {
2038 if (chk_type == SCTP_IDATA) {
2039 dmbuf = SCTP_M_COPYM(*m,
2040 (offset + sizeof(struct sctp_idata_chunk)),
2043 dmbuf = SCTP_M_COPYM(*m,
2044 (offset + sizeof(struct sctp_data_chunk)),
2047 #ifdef SCTP_MBUF_LOGGING
2048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2049 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2053 /* We can steal the last chunk */
2057 /* lop off the top part */
2058 if (chk_type == SCTP_IDATA) {
2059 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2061 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2063 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2064 l_len = SCTP_BUF_LEN(dmbuf);
2067 * need to count up the size hopefully does not hit
2073 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2074 l_len += SCTP_BUF_LEN(lat);
2077 if (l_len > the_len) {
2078 /* Trim the end round bytes off too */
2079 m_adj(dmbuf, -(l_len - the_len));
2082 if (dmbuf == NULL) {
2083 SCTP_STAT_INCR(sctps_nomem);
2087 * Now no matter what, we need a control, get one if we don't have
2088 * one (we may have gotten it above when we found the message was
2091 if (control == NULL) {
2092 sctp_alloc_a_readq(stcb, control);
2093 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2098 if (control == NULL) {
2099 SCTP_STAT_INCR(sctps_nomem);
2102 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2105 control->data = dmbuf;
2106 control->tail_mbuf = NULL;
2107 for (mm = control->data; mm; mm = mm->m_next) {
2108 control->length += SCTP_BUF_LEN(mm);
2109 if (SCTP_BUF_NEXT(mm) == NULL) {
2110 control->tail_mbuf = mm;
2113 control->end_added = 1;
2114 control->last_frag_seen = 1;
2115 control->first_frag_seen = 1;
2116 control->fsn_included = fsn;
2117 control->top_fsn = fsn;
2119 created_control = 1;
2121 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2122 chk_flags, ordered, mid, control);
2123 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2124 TAILQ_EMPTY(&asoc->resetHead) &&
2126 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2127 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2128 /* Candidate for express delivery */
2130 * Its not fragmented, No PD-API is up, Nothing in the
2131 * delivery queue, Its un-ordered OR ordered and the next to
2132 * deliver AND nothing else is stuck on the stream queue,
2133 * And there is room for it in the socket buffer. Lets just
2134 * stuff it up the buffer....
2136 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2137 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2138 asoc->highest_tsn_inside_nr_map = tsn;
2140 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2143 sctp_add_to_readq(stcb->sctp_ep, stcb,
2144 control, &stcb->sctp_socket->so_rcv,
2145 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2147 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2148 /* for ordered, bump what we delivered */
2149 asoc->strmin[sid].last_mid_delivered++;
2151 SCTP_STAT_INCR(sctps_recvexpress);
2152 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2153 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2154 SCTP_STR_LOG_FROM_EXPRS_DEL);
2157 goto finish_express_del;
2160 /* Now will we need a chunk too? */
2161 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2162 sctp_alloc_a_chunk(stcb, chk);
2164 /* No memory so we drop the chunk */
2165 SCTP_STAT_INCR(sctps_nomem);
2166 if (last_chunk == 0) {
2167 /* we copied it, free the copy */
2168 sctp_m_freem(dmbuf);
2172 chk->rec.data.tsn = tsn;
2173 chk->no_fr_allowed = 0;
2174 chk->rec.data.fsn = fsn;
2175 chk->rec.data.mid = mid;
2176 chk->rec.data.sid = sid;
2177 chk->rec.data.ppid = ppid;
2178 chk->rec.data.context = stcb->asoc.context;
2179 chk->rec.data.doing_fast_retransmit = 0;
2180 chk->rec.data.rcv_flags = chk_flags;
2182 chk->send_size = the_len;
2184 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2187 atomic_add_int(&net->ref_count, 1);
2190 /* Set the appropriate TSN mark */
2191 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2192 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2193 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2194 asoc->highest_tsn_inside_nr_map = tsn;
2197 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2198 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2199 asoc->highest_tsn_inside_map = tsn;
2202 /* Now is it complete (i.e. not fragmented)? */
2203 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2205 * Special check for when streams are resetting. We could be
2206 * more smart about this and check the actual stream to see
2207 * if it is not being reset.. that way we would not create a
2208 * HOLB when amongst streams being reset and those not being
2212 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2213 SCTP_TSN_GT(tsn, liste->tsn)) {
2215 * yep its past where we need to reset... go ahead
2218 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2220 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2222 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2223 unsigned char inserted = 0;
2225 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2226 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2230 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2235 if (inserted == 0) {
2237 * must be put at end, use prevP
2238 * (all setup from loop) to setup
2241 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2244 goto finish_express_del;
2246 if (chk_flags & SCTP_DATA_UNORDERED) {
2247 /* queue directly into socket buffer */
2248 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2250 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2251 sctp_add_to_readq(stcb->sctp_ep, stcb,
2253 &stcb->sctp_socket->so_rcv, 1,
2254 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2257 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2259 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2267 goto finish_express_del;
2269 /* If we reach here its a reassembly */
2270 need_reasm_check = 1;
2271 SCTPDBG(SCTP_DEBUG_XXX,
2272 "Queue data to stream for reasm control: %p MID: %u\n",
2274 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2277 * the assoc is now gone and chk was put onto the reasm
2278 * queue, which has all been freed.
2286 /* Here we tidy up things */
2287 if (tsn == (asoc->cumulative_tsn + 1)) {
2288 /* Update cum-ack */
2289 asoc->cumulative_tsn = tsn;
2295 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2297 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2299 SCTP_STAT_INCR(sctps_recvdata);
2300 /* Set it present please */
2301 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2302 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2304 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2305 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2306 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2308 if (need_reasm_check) {
2309 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2310 need_reasm_check = 0;
2312 /* check the special flag for stream resets */
2313 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2314 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2316 * we have finished working through the backlogged TSN's now
2317 * time to reset streams. 1: call reset function. 2: free
2318 * pending_reply space 3: distribute any chunks in
2319 * pending_reply_queue.
2321 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2322 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2323 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2324 SCTP_FREE(liste, SCTP_M_STRESET);
2325 /* sa_ignore FREED_MEMORY */
2326 liste = TAILQ_FIRST(&asoc->resetHead);
2327 if (TAILQ_EMPTY(&asoc->resetHead)) {
2328 /* All can be removed */
2329 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2330 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2331 strm = &asoc->strmin[control->sinfo_stream];
2332 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2336 if (need_reasm_check) {
2337 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2338 need_reasm_check = 0;
2342 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2343 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2347 * if control->sinfo_tsn is <= liste->tsn we
2348 * can process it which is the NOT of
2349 * control->sinfo_tsn > liste->tsn
2351 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2352 strm = &asoc->strmin[control->sinfo_stream];
2353 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2357 if (need_reasm_check) {
2358 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2359 need_reasm_check = 0;
2367 static const int8_t sctp_map_lookup_tab[256] = {
2368 0, 1, 0, 2, 0, 1, 0, 3,
2369 0, 1, 0, 2, 0, 1, 0, 4,
2370 0, 1, 0, 2, 0, 1, 0, 3,
2371 0, 1, 0, 2, 0, 1, 0, 5,
2372 0, 1, 0, 2, 0, 1, 0, 3,
2373 0, 1, 0, 2, 0, 1, 0, 4,
2374 0, 1, 0, 2, 0, 1, 0, 3,
2375 0, 1, 0, 2, 0, 1, 0, 6,
2376 0, 1, 0, 2, 0, 1, 0, 3,
2377 0, 1, 0, 2, 0, 1, 0, 4,
2378 0, 1, 0, 2, 0, 1, 0, 3,
2379 0, 1, 0, 2, 0, 1, 0, 5,
2380 0, 1, 0, 2, 0, 1, 0, 3,
2381 0, 1, 0, 2, 0, 1, 0, 4,
2382 0, 1, 0, 2, 0, 1, 0, 3,
2383 0, 1, 0, 2, 0, 1, 0, 7,
2384 0, 1, 0, 2, 0, 1, 0, 3,
2385 0, 1, 0, 2, 0, 1, 0, 4,
2386 0, 1, 0, 2, 0, 1, 0, 3,
2387 0, 1, 0, 2, 0, 1, 0, 5,
2388 0, 1, 0, 2, 0, 1, 0, 3,
2389 0, 1, 0, 2, 0, 1, 0, 4,
2390 0, 1, 0, 2, 0, 1, 0, 3,
2391 0, 1, 0, 2, 0, 1, 0, 6,
2392 0, 1, 0, 2, 0, 1, 0, 3,
2393 0, 1, 0, 2, 0, 1, 0, 4,
2394 0, 1, 0, 2, 0, 1, 0, 3,
2395 0, 1, 0, 2, 0, 1, 0, 5,
2396 0, 1, 0, 2, 0, 1, 0, 3,
2397 0, 1, 0, 2, 0, 1, 0, 4,
2398 0, 1, 0, 2, 0, 1, 0, 3,
2399 0, 1, 0, 2, 0, 1, 0, 8
2403 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2406 * Now we also need to check the mapping array in a couple of ways.
2407 * 1) Did we move the cum-ack point?
2409 * When you first glance at this you might think that all entries
2410 * that make up the position of the cum-ack would be in the
2411 * nr-mapping array only.. i.e. things up to the cum-ack are always
2412 * deliverable. Thats true with one exception, when its a fragmented
2413 * message we may not deliver the data until some threshold (or all
2414 * of it) is in place. So we must OR the nr_mapping_array and
2415 * mapping_array to get a true picture of the cum-ack.
2417 struct sctp_association *asoc;
2420 int slide_from, slide_end, lgap, distance;
2421 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2425 old_cumack = asoc->cumulative_tsn;
2426 old_base = asoc->mapping_array_base_tsn;
2427 old_highest = asoc->highest_tsn_inside_map;
2429 * We could probably improve this a small bit by calculating the
2430 * offset of the current cum-ack as the starting point.
2433 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2434 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2438 /* there is a 0 bit */
2439 at += sctp_map_lookup_tab[val];
2443 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2445 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2446 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2448 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2449 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2451 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2452 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2453 sctp_print_mapping_array(asoc);
2454 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2455 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2457 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2458 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2461 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2462 highest_tsn = asoc->highest_tsn_inside_nr_map;
2464 highest_tsn = asoc->highest_tsn_inside_map;
2466 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2467 /* The complete array was completed by a single FR */
2468 /* highest becomes the cum-ack */
2474 /* clear the array */
2475 clr = ((at + 7) >> 3);
2476 if (clr > asoc->mapping_array_size) {
2477 clr = asoc->mapping_array_size;
2479 memset(asoc->mapping_array, 0, clr);
2480 memset(asoc->nr_mapping_array, 0, clr);
2482 for (i = 0; i < asoc->mapping_array_size; i++) {
2483 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2484 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2485 sctp_print_mapping_array(asoc);
2489 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2490 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2491 } else if (at >= 8) {
2492 /* we can slide the mapping array down */
2493 /* slide_from holds where we hit the first NON 0xff byte */
2496 * now calculate the ceiling of the move using our highest
2499 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2500 slide_end = (lgap >> 3);
2501 if (slide_end < slide_from) {
2502 sctp_print_mapping_array(asoc);
2504 panic("impossible slide");
2506 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2507 lgap, slide_end, slide_from, at);
2511 if (slide_end > asoc->mapping_array_size) {
2513 panic("would overrun buffer");
2515 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2516 asoc->mapping_array_size, slide_end);
2517 slide_end = asoc->mapping_array_size;
2520 distance = (slide_end - slide_from) + 1;
2521 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2522 sctp_log_map(old_base, old_cumack, old_highest,
2523 SCTP_MAP_PREPARE_SLIDE);
2524 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2525 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2527 if (distance + slide_from > asoc->mapping_array_size ||
2530 * Here we do NOT slide forward the array so that
2531 * hopefully when more data comes in to fill it up
2532 * we will be able to slide it forward. Really I
2533 * don't think this should happen :-0
2536 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2537 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2538 (uint32_t)asoc->mapping_array_size,
2539 SCTP_MAP_SLIDE_NONE);
2544 for (ii = 0; ii < distance; ii++) {
2545 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2546 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2548 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2549 asoc->mapping_array[ii] = 0;
2550 asoc->nr_mapping_array[ii] = 0;
2552 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2553 asoc->highest_tsn_inside_map += (slide_from << 3);
2555 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2556 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2558 asoc->mapping_array_base_tsn += (slide_from << 3);
2559 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2560 sctp_log_map(asoc->mapping_array_base_tsn,
2561 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2562 SCTP_MAP_SLIDE_RESULT);
2569 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2571 struct sctp_association *asoc;
2572 uint32_t highest_tsn;
2575 sctp_slide_mapping_arrays(stcb);
2577 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2578 highest_tsn = asoc->highest_tsn_inside_nr_map;
2580 highest_tsn = asoc->highest_tsn_inside_map;
2582 /* Is there a gap now? */
2583 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2586 * Now we need to see if we need to queue a sack or just start the
2587 * timer (if allowed).
2589 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2591 * Ok special case, in SHUTDOWN-SENT case. here we maker
2592 * sure SACK timer is off and instead send a SHUTDOWN and a
2595 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2596 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2597 stcb->sctp_ep, stcb, NULL,
2598 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2600 sctp_send_shutdown(stcb,
2601 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2603 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2607 * CMT DAC algorithm: increase number of packets received
2610 stcb->asoc.cmt_dac_pkts_rcvd++;
2612 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2614 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2616 (stcb->asoc.numduptsns) || /* we have dup's */
2617 (is_a_gap) || /* is still a gap */
2618 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2619 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2621 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2622 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2623 (stcb->asoc.send_sack == 0) &&
2624 (stcb->asoc.numduptsns == 0) &&
2625 (stcb->asoc.delayed_ack) &&
2626 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2628 * CMT DAC algorithm: With CMT, delay acks
2629 * even in the face of
2631 * reordering. Therefore, if acks that do
2632 * not have to be sent because of the above
2633 * reasons, will be delayed. That is, acks
2634 * that would have been sent due to gap
2635 * reports will be delayed with DAC. Start
2636 * the delayed ack timer.
2638 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2639 stcb->sctp_ep, stcb, NULL);
2642 * Ok we must build a SACK since the timer
2643 * is pending, we got our first packet OR
2644 * there are gaps or duplicates.
2646 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2647 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2648 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2651 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2652 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2653 stcb->sctp_ep, stcb, NULL);
2660 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2661 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2662 struct sctp_nets *net, uint32_t *high_tsn)
2664 struct sctp_chunkhdr *ch, chunk_buf;
2665 struct sctp_association *asoc;
2666 int num_chunks = 0; /* number of control chunks processed */
2668 int break_flag, last_chunk;
2669 int abort_flag = 0, was_a_gap;
2671 uint32_t highest_tsn;
2672 uint16_t chk_length;
2675 sctp_set_rwnd(stcb, &stcb->asoc);
2678 SCTP_TCB_LOCK_ASSERT(stcb);
2680 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2681 highest_tsn = asoc->highest_tsn_inside_nr_map;
2683 highest_tsn = asoc->highest_tsn_inside_map;
2685 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2687 * setup where we got the last DATA packet from for any SACK that
2688 * may need to go out. Don't bump the net. This is done ONLY when a
2689 * chunk is assigned.
2691 asoc->last_data_chunk_from = net;
2694 * Now before we proceed we must figure out if this is a wasted
2695 * cluster... i.e. it is a small packet sent in and yet the driver
2696 * underneath allocated a full cluster for it. If so we must copy it
2697 * to a smaller mbuf and free up the cluster mbuf. This will help
2698 * with cluster starvation.
2700 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2701 /* we only handle mbufs that are singletons.. not chains */
2702 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2704 /* ok lets see if we can copy the data up */
2707 /* get the pointers and copy */
2708 to = mtod(m, caddr_t *);
2709 from = mtod((*mm), caddr_t *);
2710 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2711 /* copy the length and free up the old */
2712 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2714 /* success, back copy */
2717 /* We are in trouble in the mbuf world .. yikes */
2721 /* get pointer to the first chunk header */
2722 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2723 sizeof(struct sctp_chunkhdr),
2724 (uint8_t *)&chunk_buf);
2729 * process all DATA chunks...
2731 *high_tsn = asoc->cumulative_tsn;
2733 asoc->data_pkts_seen++;
2734 while (stop_proc == 0) {
2735 /* validate chunk length */
2736 chk_length = ntohs(ch->chunk_length);
2737 if (length - *offset < chk_length) {
2738 /* all done, mutulated chunk */
2742 if ((asoc->idata_supported == 1) &&
2743 (ch->chunk_type == SCTP_DATA)) {
2744 struct mbuf *op_err;
2745 char msg[SCTP_DIAG_INFO_LEN];
2747 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2748 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2749 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2750 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2753 if ((asoc->idata_supported == 0) &&
2754 (ch->chunk_type == SCTP_IDATA)) {
2755 struct mbuf *op_err;
2756 char msg[SCTP_DIAG_INFO_LEN];
2758 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2759 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2760 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2761 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2764 if ((ch->chunk_type == SCTP_DATA) ||
2765 (ch->chunk_type == SCTP_IDATA)) {
2768 if (ch->chunk_type == SCTP_DATA) {
2769 clen = sizeof(struct sctp_data_chunk);
2771 clen = sizeof(struct sctp_idata_chunk);
2773 if (chk_length < clen) {
2775 * Need to send an abort since we had a
2776 * invalid data chunk.
2778 struct mbuf *op_err;
2779 char msg[SCTP_DIAG_INFO_LEN];
2781 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2782 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2784 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2785 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2786 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2789 #ifdef SCTP_AUDITING_ENABLED
2790 sctp_audit_log(0xB1, 0);
2792 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2797 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2798 chk_length, net, high_tsn, &abort_flag, &break_flag,
2799 last_chunk, ch->chunk_type)) {
2807 * Set because of out of rwnd space and no
2808 * drop rep space left.
2814 /* not a data chunk in the data region */
2815 switch (ch->chunk_type) {
2816 case SCTP_INITIATION:
2817 case SCTP_INITIATION_ACK:
2818 case SCTP_SELECTIVE_ACK:
2819 case SCTP_NR_SELECTIVE_ACK:
2820 case SCTP_HEARTBEAT_REQUEST:
2821 case SCTP_HEARTBEAT_ACK:
2822 case SCTP_ABORT_ASSOCIATION:
2824 case SCTP_SHUTDOWN_ACK:
2825 case SCTP_OPERATION_ERROR:
2826 case SCTP_COOKIE_ECHO:
2827 case SCTP_COOKIE_ACK:
2830 case SCTP_SHUTDOWN_COMPLETE:
2831 case SCTP_AUTHENTICATION:
2832 case SCTP_ASCONF_ACK:
2833 case SCTP_PACKET_DROPPED:
2834 case SCTP_STREAM_RESET:
2835 case SCTP_FORWARD_CUM_TSN:
2839 * Now, what do we do with KNOWN
2840 * chunks that are NOT in the right
2843 * For now, I do nothing but ignore
2844 * them. We may later want to add
2845 * sysctl stuff to switch out and do
2846 * either an ABORT() or possibly
2849 struct mbuf *op_err;
2850 char msg[SCTP_DIAG_INFO_LEN];
2852 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2854 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2855 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2860 * Unknown chunk type: use bit rules after
2863 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2865 * Need to send an abort since we
2866 * had a invalid chunk.
2868 struct mbuf *op_err;
2869 char msg[SCTP_DIAG_INFO_LEN];
2871 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2872 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2873 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2874 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2877 if (ch->chunk_type & 0x40) {
2878 /* Add a error report to the queue */
2879 struct mbuf *op_err;
2880 struct sctp_gen_error_cause *cause;
2882 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2883 0, M_NOWAIT, 1, MT_DATA);
2884 if (op_err != NULL) {
2885 cause = mtod(op_err, struct sctp_gen_error_cause *);
2886 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2887 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2888 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2889 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2890 if (SCTP_BUF_NEXT(op_err) != NULL) {
2891 sctp_queue_op_err(stcb, op_err);
2893 sctp_m_freem(op_err);
2897 if ((ch->chunk_type & 0x80) == 0) {
2898 /* discard the rest of this packet */
2900 } /* else skip this bad chunk and
2903 } /* switch of chunk type */
2905 *offset += SCTP_SIZE32(chk_length);
2906 if ((*offset >= length) || stop_proc) {
2907 /* no more data left in the mbuf chain */
2911 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2912 sizeof(struct sctp_chunkhdr),
2913 (uint8_t *)&chunk_buf);
2922 * we need to report rwnd overrun drops.
2924 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2928 * Did we get data, if so update the time for auto-close and
2929 * give peer credit for being alive.
2931 SCTP_STAT_INCR(sctps_recvpktwithdata);
2932 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2933 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2934 stcb->asoc.overall_error_count,
2936 SCTP_FROM_SCTP_INDATA,
2939 stcb->asoc.overall_error_count = 0;
2940 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2942 /* now service all of the reassm queue if needed */
2943 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2944 /* Assure that we ack right away */
2945 stcb->asoc.send_sack = 1;
2947 /* Start a sack timer or QUEUE a SACK for sending */
2948 sctp_sack_check(stcb, was_a_gap);
2953 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2954 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2956 uint32_t *biggest_newly_acked_tsn,
2957 uint32_t *this_sack_lowest_newack,
2960 struct sctp_tmit_chunk *tp1;
2961 unsigned int theTSN;
2962 int j, wake_him = 0, circled = 0;
2964 /* Recover the tp1 we last saw */
2967 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2969 for (j = frag_strt; j <= frag_end; j++) {
2970 theTSN = j + last_tsn;
2972 if (tp1->rec.data.doing_fast_retransmit)
2976 * CMT: CUCv2 algorithm. For each TSN being
2977 * processed from the sent queue, track the
2978 * next expected pseudo-cumack, or
2979 * rtx_pseudo_cumack, if required. Separate
2980 * cumack trackers for first transmissions,
2981 * and retransmissions.
2983 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2984 (tp1->whoTo->find_pseudo_cumack == 1) &&
2985 (tp1->snd_count == 1)) {
2986 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2987 tp1->whoTo->find_pseudo_cumack = 0;
2989 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2990 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2991 (tp1->snd_count > 1)) {
2992 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2993 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2995 if (tp1->rec.data.tsn == theTSN) {
2996 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2998 * must be held until
3001 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3003 * If it is less than RESEND, it is
3004 * now no-longer in flight.
3005 * Higher values may already be set
3006 * via previous Gap Ack Blocks...
3007 * i.e. ACKED or RESEND.
3009 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3010 *biggest_newly_acked_tsn)) {
3011 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3014 * CMT: SFR algo (and HTNA) - set
3015 * saw_newack to 1 for dest being
3016 * newly acked. update
3017 * this_sack_highest_newack if
3020 if (tp1->rec.data.chunk_was_revoked == 0)
3021 tp1->whoTo->saw_newack = 1;
3023 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3024 tp1->whoTo->this_sack_highest_newack)) {
3025 tp1->whoTo->this_sack_highest_newack =
3029 * CMT DAC algo: also update
3030 * this_sack_lowest_newack
3032 if (*this_sack_lowest_newack == 0) {
3033 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3034 sctp_log_sack(*this_sack_lowest_newack,
3039 SCTP_LOG_TSN_ACKED);
3041 *this_sack_lowest_newack = tp1->rec.data.tsn;
3044 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3045 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3046 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3047 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3048 * Separate pseudo_cumack trackers for first transmissions and
3051 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3052 if (tp1->rec.data.chunk_was_revoked == 0) {
3053 tp1->whoTo->new_pseudo_cumack = 1;
3055 tp1->whoTo->find_pseudo_cumack = 1;
3057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3058 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3060 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3061 if (tp1->rec.data.chunk_was_revoked == 0) {
3062 tp1->whoTo->new_pseudo_cumack = 1;
3064 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3066 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3067 sctp_log_sack(*biggest_newly_acked_tsn,
3072 SCTP_LOG_TSN_ACKED);
3074 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3075 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3076 tp1->whoTo->flight_size,
3078 (uint32_t)(uintptr_t)tp1->whoTo,
3081 sctp_flight_size_decrease(tp1);
3082 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3083 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3086 sctp_total_flight_decrease(stcb, tp1);
3088 tp1->whoTo->net_ack += tp1->send_size;
3089 if (tp1->snd_count < 2) {
3091 * True non-retransmitted chunk
3093 tp1->whoTo->net_ack2 += tp1->send_size;
3100 sctp_calculate_rto(stcb,
3103 &tp1->sent_rcv_time,
3104 SCTP_RTT_FROM_DATA)) {
3107 if (tp1->whoTo->rto_needed == 0) {
3108 tp1->whoTo->rto_needed = 1;
3114 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3115 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3116 stcb->asoc.this_sack_highest_gap)) {
3117 stcb->asoc.this_sack_highest_gap =
3120 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3121 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3122 #ifdef SCTP_AUDITING_ENABLED
3123 sctp_audit_log(0xB2,
3124 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3129 * All chunks NOT UNSENT fall through here and are marked
3130 * (leave PR-SCTP ones that are to skip alone though)
3132 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3133 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3134 tp1->sent = SCTP_DATAGRAM_MARKED;
3136 if (tp1->rec.data.chunk_was_revoked) {
3137 /* deflate the cwnd */
3138 tp1->whoTo->cwnd -= tp1->book_size;
3139 tp1->rec.data.chunk_was_revoked = 0;
3141 /* NR Sack code here */
3143 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3144 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3145 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3148 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3151 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3152 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3153 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3154 stcb->asoc.trigger_reset = 1;
3156 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3162 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3163 sctp_m_freem(tp1->data);
3170 } /* if (tp1->tsn == theTSN) */
3171 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3174 tp1 = TAILQ_NEXT(tp1, sctp_next);
3175 if ((tp1 == NULL) && (circled == 0)) {
3177 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3179 } /* end while (tp1) */
3182 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3184 /* In case the fragments were not in order we must reset */
3185 } /* end for (j = fragStart */
3187 return (wake_him); /* Return value only used for nr-sack */
3191 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3192 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3193 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3194 int num_seg, int num_nr_seg, int *rto_ok)
3196 struct sctp_gap_ack_block *frag, block;
3197 struct sctp_tmit_chunk *tp1;
3202 uint16_t frag_strt, frag_end, prev_frag_end;
3204 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3208 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3211 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3213 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3214 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3215 *offset += sizeof(block);
3217 return (chunk_freed);
3219 frag_strt = ntohs(frag->start);
3220 frag_end = ntohs(frag->end);
3222 if (frag_strt > frag_end) {
3223 /* This gap report is malformed, skip it. */
3226 if (frag_strt <= prev_frag_end) {
3227 /* This gap report is not in order, so restart. */
3228 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3230 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3231 *biggest_tsn_acked = last_tsn + frag_end;
3238 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3239 non_revocable, &num_frs, biggest_newly_acked_tsn,
3240 this_sack_lowest_newack, rto_ok)) {
3243 prev_frag_end = frag_end;
3245 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3247 sctp_log_fr(*biggest_tsn_acked,
3248 *biggest_newly_acked_tsn,
3249 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3251 return (chunk_freed);
3255 sctp_check_for_revoked(struct sctp_tcb *stcb,
3256 struct sctp_association *asoc, uint32_t cumack,
3257 uint32_t biggest_tsn_acked)
3259 struct sctp_tmit_chunk *tp1;
3261 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3262 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3264 * ok this guy is either ACK or MARKED. If it is
3265 * ACKED it has been previously acked but not this
3266 * time i.e. revoked. If it is MARKED it was ACK'ed
3269 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3272 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3273 /* it has been revoked */
3274 tp1->sent = SCTP_DATAGRAM_SENT;
3275 tp1->rec.data.chunk_was_revoked = 1;
3277 * We must add this stuff back in to assure
3278 * timers and such get started.
3280 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3281 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3282 tp1->whoTo->flight_size,
3284 (uint32_t)(uintptr_t)tp1->whoTo,
3287 sctp_flight_size_increase(tp1);
3288 sctp_total_flight_increase(stcb, tp1);
3290 * We inflate the cwnd to compensate for our
3291 * artificial inflation of the flight_size.
3293 tp1->whoTo->cwnd += tp1->book_size;
3294 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3295 sctp_log_sack(asoc->last_acked_seq,
3300 SCTP_LOG_TSN_REVOKED);
3302 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3303 /* it has been re-acked in this SACK */
3304 tp1->sent = SCTP_DATAGRAM_ACKED;
3307 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3313 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3314 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3316 struct sctp_tmit_chunk *tp1;
3317 int strike_flag = 0;
3319 int tot_retrans = 0;
3320 uint32_t sending_seq;
3321 struct sctp_nets *net;
3322 int num_dests_sacked = 0;
3325 * select the sending_seq, this is either the next thing ready to be
3326 * sent but not transmitted, OR, the next seq we assign.
3328 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3330 sending_seq = asoc->sending_seq;
3332 sending_seq = tp1->rec.data.tsn;
3335 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3336 if ((asoc->sctp_cmt_on_off > 0) &&
3337 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3338 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3339 if (net->saw_newack)
3343 if (stcb->asoc.prsctp_supported) {
3344 (void)SCTP_GETTIME_TIMEVAL(&now);
3346 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3348 if (tp1->no_fr_allowed) {
3349 /* this one had a timeout or something */
3352 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3353 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3354 sctp_log_fr(biggest_tsn_newly_acked,
3357 SCTP_FR_LOG_CHECK_STRIKE);
3359 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3360 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3364 if (stcb->asoc.prsctp_supported) {
3365 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3366 /* Is it expired? */
3367 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3368 /* Yes so drop it */
3369 if (tp1->data != NULL) {
3370 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3371 SCTP_SO_NOT_LOCKED);
3377 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3378 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3379 /* we are beyond the tsn in the sack */
3382 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3383 /* either a RESEND, ACKED, or MARKED */
3385 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3386 /* Continue strikin FWD-TSN chunks */
3387 tp1->rec.data.fwd_tsn_cnt++;
3392 * CMT : SFR algo (covers part of DAC and HTNA as well)
3394 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3396 * No new acks were receieved for data sent to this
3397 * dest. Therefore, according to the SFR algo for
3398 * CMT, no data sent to this dest can be marked for
3399 * FR using this SACK.
3402 } else if (tp1->whoTo &&
3403 SCTP_TSN_GT(tp1->rec.data.tsn,
3404 tp1->whoTo->this_sack_highest_newack) &&
3405 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3407 * CMT: New acks were receieved for data sent to
3408 * this dest. But no new acks were seen for data
3409 * sent after tp1. Therefore, according to the SFR
3410 * algo for CMT, tp1 cannot be marked for FR using
3411 * this SACK. This step covers part of the DAC algo
3412 * and the HTNA algo as well.
3417 * Here we check to see if we were have already done a FR
3418 * and if so we see if the biggest TSN we saw in the sack is
3419 * smaller than the recovery point. If so we don't strike
3420 * the tsn... otherwise we CAN strike the TSN.
3423 * @@@ JRI: Check for CMT if (accum_moved &&
3424 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3427 if (accum_moved && asoc->fast_retran_loss_recovery) {
3429 * Strike the TSN if in fast-recovery and cum-ack
3432 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3433 sctp_log_fr(biggest_tsn_newly_acked,
3436 SCTP_FR_LOG_STRIKE_CHUNK);
3438 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3441 if ((asoc->sctp_cmt_on_off > 0) &&
3442 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3444 * CMT DAC algorithm: If SACK flag is set to
3445 * 0, then lowest_newack test will not pass
3446 * because it would have been set to the
3447 * cumack earlier. If not already to be
3448 * rtx'd, If not a mixed sack and if tp1 is
3449 * not between two sacked TSNs, then mark by
3450 * one more. NOTE that we are marking by one
3451 * additional time since the SACK DAC flag
3452 * indicates that two packets have been
3453 * received after this missing TSN.
3455 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3456 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3457 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3458 sctp_log_fr(16 + num_dests_sacked,
3461 SCTP_FR_LOG_STRIKE_CHUNK);
3466 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3467 (asoc->sctp_cmt_on_off == 0)) {
3469 * For those that have done a FR we must take
3470 * special consideration if we strike. I.e the
3471 * biggest_newly_acked must be higher than the
3472 * sending_seq at the time we did the FR.
3475 #ifdef SCTP_FR_TO_ALTERNATE
3477 * If FR's go to new networks, then we must only do
3478 * this for singly homed asoc's. However if the FR's
3479 * go to the same network (Armando's work) then its
3480 * ok to FR multiple times.
3487 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3488 tp1->rec.data.fast_retran_tsn)) {
3490 * Strike the TSN, since this ack is
3491 * beyond where things were when we
3494 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3495 sctp_log_fr(biggest_tsn_newly_acked,
3498 SCTP_FR_LOG_STRIKE_CHUNK);
3500 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3504 if ((asoc->sctp_cmt_on_off > 0) &&
3505 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3507 * CMT DAC algorithm: If
3508 * SACK flag is set to 0,
3509 * then lowest_newack test
3510 * will not pass because it
3511 * would have been set to
3512 * the cumack earlier. If
3513 * not already to be rtx'd,
3514 * If not a mixed sack and
3515 * if tp1 is not between two
3516 * sacked TSNs, then mark by
3517 * one more. NOTE that we
3518 * are marking by one
3519 * additional time since the
3520 * SACK DAC flag indicates
3521 * that two packets have
3522 * been received after this
3525 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3526 (num_dests_sacked == 1) &&
3527 SCTP_TSN_GT(this_sack_lowest_newack,
3528 tp1->rec.data.tsn)) {
3529 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3530 sctp_log_fr(32 + num_dests_sacked,
3533 SCTP_FR_LOG_STRIKE_CHUNK);
3535 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3543 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3546 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3547 biggest_tsn_newly_acked)) {
3549 * We don't strike these: This is the HTNA
3550 * algorithm i.e. we don't strike If our TSN is
3551 * larger than the Highest TSN Newly Acked.
3555 /* Strike the TSN */
3556 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3557 sctp_log_fr(biggest_tsn_newly_acked,
3560 SCTP_FR_LOG_STRIKE_CHUNK);
3562 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3565 if ((asoc->sctp_cmt_on_off > 0) &&
3566 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3568 * CMT DAC algorithm: If SACK flag is set to
3569 * 0, then lowest_newack test will not pass
3570 * because it would have been set to the
3571 * cumack earlier. If not already to be
3572 * rtx'd, If not a mixed sack and if tp1 is
3573 * not between two sacked TSNs, then mark by
3574 * one more. NOTE that we are marking by one
3575 * additional time since the SACK DAC flag
3576 * indicates that two packets have been
3577 * received after this missing TSN.
3579 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3580 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3582 sctp_log_fr(48 + num_dests_sacked,
3585 SCTP_FR_LOG_STRIKE_CHUNK);
3591 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3592 struct sctp_nets *alt;
3594 /* fix counts and things */
3595 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3596 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3597 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3599 (uint32_t)(uintptr_t)tp1->whoTo,
3603 tp1->whoTo->net_ack++;
3604 sctp_flight_size_decrease(tp1);
3605 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3606 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3611 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3612 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3613 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3615 /* add back to the rwnd */
3616 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3618 /* remove from the total flight */
3619 sctp_total_flight_decrease(stcb, tp1);
3621 if ((stcb->asoc.prsctp_supported) &&
3622 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3624 * Has it been retransmitted tv_sec times? -
3625 * we store the retran count there.
3627 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3628 /* Yes, so drop it */
3629 if (tp1->data != NULL) {
3630 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3631 SCTP_SO_NOT_LOCKED);
3633 /* Make sure to flag we had a FR */
3634 if (tp1->whoTo != NULL) {
3635 tp1->whoTo->net_ack++;
3641 * SCTP_PRINTF("OK, we are now ready to FR this
3644 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3645 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3649 /* This is a subsequent FR */
3650 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3652 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3653 if (asoc->sctp_cmt_on_off > 0) {
3655 * CMT: Using RTX_SSTHRESH policy for CMT.
3656 * If CMT is being used, then pick dest with
3657 * largest ssthresh for any retransmission.
3659 tp1->no_fr_allowed = 1;
3661 /* sa_ignore NO_NULL_CHK */
3662 if (asoc->sctp_cmt_pf > 0) {
3664 * JRS 5/18/07 - If CMT PF is on,
3665 * use the PF version of
3668 alt = sctp_find_alternate_net(stcb, alt, 2);
3671 * JRS 5/18/07 - If only CMT is on,
3672 * use the CMT version of
3675 /* sa_ignore NO_NULL_CHK */
3676 alt = sctp_find_alternate_net(stcb, alt, 1);
3682 * CUCv2: If a different dest is picked for
3683 * the retransmission, then new
3684 * (rtx-)pseudo_cumack needs to be tracked
3685 * for orig dest. Let CUCv2 track new (rtx-)
3686 * pseudo-cumack always.
3689 tp1->whoTo->find_pseudo_cumack = 1;
3690 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3693 } else { /* CMT is OFF */
3695 #ifdef SCTP_FR_TO_ALTERNATE
3696 /* Can we find an alternate? */
3697 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3700 * default behavior is to NOT retransmit
3701 * FR's to an alternate. Armando Caro's
3702 * paper details why.
3708 tp1->rec.data.doing_fast_retransmit = 1;
3710 /* mark the sending seq for possible subsequent FR's */
3712 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3713 * (uint32_t)tpi->rec.data.tsn);
3715 if (TAILQ_EMPTY(&asoc->send_queue)) {
3717 * If the queue of send is empty then its
3718 * the next sequence number that will be
3719 * assigned so we subtract one from this to
3720 * get the one we last sent.
3722 tp1->rec.data.fast_retran_tsn = sending_seq;
3725 * If there are chunks on the send queue
3726 * (unsent data that has made it from the
3727 * stream queues but not out the door, we
3728 * take the first one (which will have the
3729 * lowest TSN) and subtract one to get the
3732 struct sctp_tmit_chunk *ttt;
3734 ttt = TAILQ_FIRST(&asoc->send_queue);
3735 tp1->rec.data.fast_retran_tsn =
3741 * this guy had a RTO calculation pending on
3744 if ((tp1->whoTo != NULL) &&
3745 (tp1->whoTo->rto_needed == 0)) {
3746 tp1->whoTo->rto_needed = 1;
3750 if (alt != tp1->whoTo) {
3751 /* yes, there is an alternate. */
3752 sctp_free_remote_addr(tp1->whoTo);
3753 /* sa_ignore FREED_MEMORY */
3755 atomic_add_int(&alt->ref_count, 1);
3761 struct sctp_tmit_chunk *
3762 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3763 struct sctp_association *asoc)
3765 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3769 if (asoc->prsctp_supported == 0) {
3772 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3773 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3774 tp1->sent != SCTP_DATAGRAM_RESEND &&
3775 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3776 /* no chance to advance, out of here */
3779 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3780 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3781 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3782 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3783 asoc->advanced_peer_ack_point,
3784 tp1->rec.data.tsn, 0, 0);
3787 if (!PR_SCTP_ENABLED(tp1->flags)) {
3789 * We can't fwd-tsn past any that are reliable aka
3790 * retransmitted until the asoc fails.
3795 (void)SCTP_GETTIME_TIMEVAL(&now);
3799 * now we got a chunk which is marked for another
3800 * retransmission to a PR-stream but has run out its chances
3801 * already maybe OR has been marked to skip now. Can we skip
3802 * it if its a resend?
3804 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3805 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3807 * Now is this one marked for resend and its time is
3810 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3811 /* Yes so drop it */
3813 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3814 1, SCTP_SO_NOT_LOCKED);
3818 * No, we are done when hit one for resend
3819 * whos time as not expired.
3825 * Ok now if this chunk is marked to drop it we can clean up
3826 * the chunk, advance our peer ack point and we can check
3829 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3830 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3831 /* advance PeerAckPoint goes forward */
3832 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3833 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3835 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3836 /* No update but we do save the chk */
3841 * If it is still in RESEND we can advance no
3851 sctp_fs_audit(struct sctp_association *asoc)
3853 struct sctp_tmit_chunk *chk;
3854 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3857 int entry_flight, entry_cnt;
3862 entry_flight = asoc->total_flight;
3863 entry_cnt = asoc->total_flight_count;
3865 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3868 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3869 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3870 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3875 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3877 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3879 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3886 if ((inflight > 0) || (inbetween > 0)) {
3888 panic("Flight size-express incorrect? \n");
3890 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3891 entry_flight, entry_cnt);
3893 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3894 inflight, inbetween, resend, above, acked);
3902 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3903 struct sctp_association *asoc,
3904 struct sctp_tmit_chunk *tp1)
3906 tp1->window_probe = 0;
3907 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3908 /* TSN's skipped we do NOT move back. */
3909 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3910 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3912 (uint32_t)(uintptr_t)tp1->whoTo,
3916 /* First setup this by shrinking flight */
3917 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3918 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3921 sctp_flight_size_decrease(tp1);
3922 sctp_total_flight_decrease(stcb, tp1);
3923 /* Now mark for resend */
3924 tp1->sent = SCTP_DATAGRAM_RESEND;
3925 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3927 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3928 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3929 tp1->whoTo->flight_size,
3931 (uint32_t)(uintptr_t)tp1->whoTo,
3937 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3938 uint32_t rwnd, int *abort_now, int ecne_seen)
3940 struct sctp_nets *net;
3941 struct sctp_association *asoc;
3942 struct sctp_tmit_chunk *tp1, *tp2;
3944 int win_probe_recovery = 0;
3945 int win_probe_recovered = 0;
3946 int j, done_once = 0;
3950 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3951 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3952 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3954 SCTP_TCB_LOCK_ASSERT(stcb);
3955 #ifdef SCTP_ASOCLOG_OF_TSNS
3956 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3957 stcb->asoc.cumack_log_at++;
3958 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3959 stcb->asoc.cumack_log_at = 0;
3963 old_rwnd = asoc->peers_rwnd;
3964 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3967 } else if (asoc->last_acked_seq == cumack) {
3968 /* Window update sack */
3969 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3970 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3971 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3972 /* SWS sender side engages */
3973 asoc->peers_rwnd = 0;
3975 if (asoc->peers_rwnd > old_rwnd) {
3981 /* First setup for CC stuff */
3982 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3983 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3984 /* Drag along the window_tsn for cwr's */
3985 net->cwr_window_tsn = cumack;
3987 net->prev_cwnd = net->cwnd;
3992 * CMT: Reset CUC and Fast recovery algo variables before
3995 net->new_pseudo_cumack = 0;
3996 net->will_exit_fast_recovery = 0;
3997 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3998 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4001 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4002 tp1 = TAILQ_LAST(&asoc->sent_queue,
4003 sctpchunk_listhead);
4004 send_s = tp1->rec.data.tsn + 1;
4006 send_s = asoc->sending_seq;
4008 if (SCTP_TSN_GE(cumack, send_s)) {
4009 struct mbuf *op_err;
4010 char msg[SCTP_DIAG_INFO_LEN];
4014 SCTP_SNPRINTF(msg, sizeof(msg),
4015 "Cum ack %8.8x greater or equal than TSN %8.8x",
4017 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4018 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4019 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4022 asoc->this_sack_highest_gap = cumack;
4023 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4024 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4025 stcb->asoc.overall_error_count,
4027 SCTP_FROM_SCTP_INDATA,
4030 stcb->asoc.overall_error_count = 0;
4031 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4032 /* process the new consecutive TSN first */
4033 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4034 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4035 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4036 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4038 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4040 * If it is less than ACKED, it is
4041 * now no-longer in flight. Higher
4042 * values may occur during marking
4044 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4045 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4046 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4047 tp1->whoTo->flight_size,
4049 (uint32_t)(uintptr_t)tp1->whoTo,
4052 sctp_flight_size_decrease(tp1);
4053 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4054 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4057 /* sa_ignore NO_NULL_CHK */
4058 sctp_total_flight_decrease(stcb, tp1);
4060 tp1->whoTo->net_ack += tp1->send_size;
4061 if (tp1->snd_count < 2) {
4063 * True non-retransmitted
4066 tp1->whoTo->net_ack2 +=
4069 /* update RTO too? */
4072 sctp_calculate_rto(stcb,
4075 &tp1->sent_rcv_time,
4076 SCTP_RTT_FROM_DATA)) {
4079 if (tp1->whoTo->rto_needed == 0) {
4080 tp1->whoTo->rto_needed = 1;
4086 * CMT: CUCv2 algorithm. From the
4087 * cumack'd TSNs, for each TSN being
4088 * acked for the first time, set the
4089 * following variables for the
4090 * corresp destination.
4091 * new_pseudo_cumack will trigger a
4093 * find_(rtx_)pseudo_cumack will
4094 * trigger search for the next
4095 * expected (rtx-)pseudo-cumack.
4097 tp1->whoTo->new_pseudo_cumack = 1;
4098 tp1->whoTo->find_pseudo_cumack = 1;
4099 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4101 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4102 /* sa_ignore NO_NULL_CHK */
4103 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4106 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4107 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4109 if (tp1->rec.data.chunk_was_revoked) {
4110 /* deflate the cwnd */
4111 tp1->whoTo->cwnd -= tp1->book_size;
4112 tp1->rec.data.chunk_was_revoked = 0;
4114 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4115 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4116 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4119 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4123 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4124 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4125 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4126 asoc->trigger_reset = 1;
4128 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4130 /* sa_ignore NO_NULL_CHK */
4131 sctp_free_bufspace(stcb, asoc, tp1, 1);
4132 sctp_m_freem(tp1->data);
4135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4136 sctp_log_sack(asoc->last_acked_seq,
4141 SCTP_LOG_FREE_SENT);
4143 asoc->sent_queue_cnt--;
4144 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4150 /* sa_ignore NO_NULL_CHK */
4151 if (stcb->sctp_socket) {
4152 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4153 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4154 /* sa_ignore NO_NULL_CHK */
4155 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4157 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4160 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4164 /* JRS - Use the congestion control given in the CC module */
4165 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4166 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4167 if (net->net_ack2 > 0) {
4169 * Karn's rule applies to clearing error
4170 * count, this is optional.
4172 net->error_count = 0;
4173 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4174 /* addr came good */
4175 net->dest_state |= SCTP_ADDR_REACHABLE;
4176 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4177 0, (void *)net, SCTP_SO_NOT_LOCKED);
4179 if (net == stcb->asoc.primary_destination) {
4180 if (stcb->asoc.alternate) {
4182 * release the alternate,
4185 sctp_free_remote_addr(stcb->asoc.alternate);
4186 stcb->asoc.alternate = NULL;
4189 if (net->dest_state & SCTP_ADDR_PF) {
4190 net->dest_state &= ~SCTP_ADDR_PF;
4191 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4192 stcb->sctp_ep, stcb, net,
4193 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4194 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4195 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4196 /* Done with this net */
4199 /* restore any doubled timers */
4200 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4201 if (net->RTO < stcb->asoc.minrto) {
4202 net->RTO = stcb->asoc.minrto;
4204 if (net->RTO > stcb->asoc.maxrto) {
4205 net->RTO = stcb->asoc.maxrto;
4209 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4211 asoc->last_acked_seq = cumack;
4213 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4214 /* nothing left in-flight */
4215 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4216 net->flight_size = 0;
4217 net->partial_bytes_acked = 0;
4219 asoc->total_flight = 0;
4220 asoc->total_flight_count = 0;
4224 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4225 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4226 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4227 /* SWS sender side engages */
4228 asoc->peers_rwnd = 0;
4230 if (asoc->peers_rwnd > old_rwnd) {
4231 win_probe_recovery = 1;
4233 /* Now assure a timer where data is queued at */
4236 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4237 if (win_probe_recovery && (net->window_probe)) {
4238 win_probe_recovered = 1;
4240 * Find first chunk that was used with window probe
4241 * and clear the sent
4243 /* sa_ignore FREED_MEMORY */
4244 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4245 if (tp1->window_probe) {
4246 /* move back to data send queue */
4247 sctp_window_probe_recovery(stcb, asoc, tp1);
4252 if (net->flight_size) {
4254 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4255 if (net->window_probe) {
4256 net->window_probe = 0;
4259 if (net->window_probe) {
4261 * In window probes we must assure a timer
4262 * is still running there
4264 net->window_probe = 0;
4265 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4266 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4268 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4269 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4271 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4276 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4277 (asoc->sent_queue_retran_cnt == 0) &&
4278 (win_probe_recovered == 0) &&
4281 * huh, this should not happen unless all packets are
4282 * PR-SCTP and marked to skip of course.
4284 if (sctp_fs_audit(asoc)) {
4285 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4286 net->flight_size = 0;
4288 asoc->total_flight = 0;
4289 asoc->total_flight_count = 0;
4290 asoc->sent_queue_retran_cnt = 0;
4291 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4292 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4293 sctp_flight_size_increase(tp1);
4294 sctp_total_flight_increase(stcb, tp1);
4295 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4296 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4303 /**********************************/
4304 /* Now what about shutdown issues */
4305 /**********************************/
4306 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4307 /* nothing left on sendqueue.. consider done */
4309 if ((asoc->stream_queue_cnt == 1) &&
4310 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4311 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4312 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4313 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4315 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4316 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4317 (asoc->stream_queue_cnt == 1) &&
4318 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4319 struct mbuf *op_err;
4323 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4324 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4325 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4328 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4329 (asoc->stream_queue_cnt == 0)) {
4330 struct sctp_nets *netp;
4332 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4333 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4334 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4336 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4337 sctp_stop_timers_for_shutdown(stcb);
4338 if (asoc->alternate) {
4339 netp = asoc->alternate;
4341 netp = asoc->primary_destination;
4343 sctp_send_shutdown(stcb, netp);
4344 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4345 stcb->sctp_ep, stcb, netp);
4346 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4347 stcb->sctp_ep, stcb, NULL);
4348 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4349 (asoc->stream_queue_cnt == 0)) {
4350 struct sctp_nets *netp;
4352 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4353 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4354 sctp_stop_timers_for_shutdown(stcb);
4355 if (asoc->alternate) {
4356 netp = asoc->alternate;
4358 netp = asoc->primary_destination;
4360 sctp_send_shutdown_ack(stcb, netp);
4361 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4362 stcb->sctp_ep, stcb, netp);
4365 /*********************************************/
4366 /* Here we perform PR-SCTP procedures */
4368 /*********************************************/
4369 /* C1. update advancedPeerAckPoint */
4370 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4371 asoc->advanced_peer_ack_point = cumack;
4373 /* PR-Sctp issues need to be addressed too */
4374 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4375 struct sctp_tmit_chunk *lchk;
4376 uint32_t old_adv_peer_ack_point;
4378 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4379 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4380 /* C3. See if we need to send a Fwd-TSN */
4381 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4383 * ISSUE with ECN, see FWD-TSN processing.
4385 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4386 send_forward_tsn(stcb, asoc);
4388 /* try to FR fwd-tsn's that get lost too */
4389 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4390 send_forward_tsn(stcb, asoc);
4394 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4395 if (lchk->whoTo != NULL) {
4400 /* Assure a timer is up */
4401 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4402 stcb->sctp_ep, stcb, lchk->whoTo);
4405 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4406 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4408 stcb->asoc.peers_rwnd,
4409 stcb->asoc.total_flight,
4410 stcb->asoc.total_output_queue_size);
4415 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4416 struct sctp_tcb *stcb,
4417 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4418 int *abort_now, uint8_t flags,
4419 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4421 struct sctp_association *asoc;
4422 struct sctp_tmit_chunk *tp1, *tp2;
4423 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4424 uint16_t wake_him = 0;
4425 uint32_t send_s = 0;
4427 int accum_moved = 0;
4428 int will_exit_fast_recovery = 0;
4429 uint32_t a_rwnd, old_rwnd;
4430 int win_probe_recovery = 0;
4431 int win_probe_recovered = 0;
4432 struct sctp_nets *net = NULL;
4435 uint8_t reneged_all = 0;
4436 uint8_t cmt_dac_flag;
4439 * we take any chance we can to service our queues since we cannot
4440 * get awoken when the socket is read from :<
4443 * Now perform the actual SACK handling: 1) Verify that it is not an
4444 * old sack, if so discard. 2) If there is nothing left in the send
4445 * queue (cum-ack is equal to last acked) then you have a duplicate
4446 * too, update any rwnd change and verify no timers are running.
4447 * then return. 3) Process any new consequtive data i.e. cum-ack
4448 * moved process these first and note that it moved. 4) Process any
4449 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4450 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4451 * sync up flightsizes and things, stop all timers and also check
4452 * for shutdown_pending state. If so then go ahead and send off the
4453 * shutdown. If in shutdown recv, send off the shutdown-ack and
4454 * start that timer, Ret. 9) Strike any non-acked things and do FR
4455 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4456 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4457 * if in shutdown_recv state.
4459 SCTP_TCB_LOCK_ASSERT(stcb);
4461 this_sack_lowest_newack = 0;
4462 SCTP_STAT_INCR(sctps_slowpath_sack);
4464 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4465 #ifdef SCTP_ASOCLOG_OF_TSNS
4466 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4467 stcb->asoc.cumack_log_at++;
4468 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4469 stcb->asoc.cumack_log_at = 0;
4474 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4475 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4476 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4479 old_rwnd = stcb->asoc.peers_rwnd;
4480 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4481 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4482 stcb->asoc.overall_error_count,
4484 SCTP_FROM_SCTP_INDATA,
4487 stcb->asoc.overall_error_count = 0;
4489 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4490 sctp_log_sack(asoc->last_acked_seq,
4497 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4499 uint32_t *dupdata, dblock;
4501 for (i = 0; i < num_dup; i++) {
4502 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4503 sizeof(uint32_t), (uint8_t *)&dblock);
4504 if (dupdata == NULL) {
4507 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4511 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4512 tp1 = TAILQ_LAST(&asoc->sent_queue,
4513 sctpchunk_listhead);
4514 send_s = tp1->rec.data.tsn + 1;
4517 send_s = asoc->sending_seq;
4519 if (SCTP_TSN_GE(cum_ack, send_s)) {
4520 struct mbuf *op_err;
4521 char msg[SCTP_DIAG_INFO_LEN];
4524 * no way, we have not even sent this TSN out yet. Peer is
4525 * hopelessly messed up with us.
4527 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4530 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4531 tp1->rec.data.tsn, (void *)tp1);
4536 SCTP_SNPRINTF(msg, sizeof(msg),
4537 "Cum ack %8.8x greater or equal than TSN %8.8x",
4539 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4540 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4541 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4544 /**********************/
4545 /* 1) check the range */
4546 /**********************/
4547 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4548 /* acking something behind */
4552 /* update the Rwnd of the peer */
4553 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4554 TAILQ_EMPTY(&asoc->send_queue) &&
4555 (asoc->stream_queue_cnt == 0)) {
4556 /* nothing left on send/sent and strmq */
4557 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4558 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4559 asoc->peers_rwnd, 0, 0, a_rwnd);
4561 asoc->peers_rwnd = a_rwnd;
4562 if (asoc->sent_queue_retran_cnt) {
4563 asoc->sent_queue_retran_cnt = 0;
4565 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4566 /* SWS sender side engages */
4567 asoc->peers_rwnd = 0;
4569 /* stop any timers */
4570 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4571 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4572 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4573 net->partial_bytes_acked = 0;
4574 net->flight_size = 0;
4576 asoc->total_flight = 0;
4577 asoc->total_flight_count = 0;
4581 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4582 * things. The total byte count acked is tracked in netAckSz AND
4583 * netAck2 is used to track the total bytes acked that are un-
4584 * amibguious and were never retransmitted. We track these on a per
4585 * destination address basis.
4587 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4588 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4589 /* Drag along the window_tsn for cwr's */
4590 net->cwr_window_tsn = cum_ack;
4592 net->prev_cwnd = net->cwnd;
4597 * CMT: Reset CUC and Fast recovery algo variables before
4600 net->new_pseudo_cumack = 0;
4601 net->will_exit_fast_recovery = 0;
4602 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4603 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4607 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4608 * to be greater than the cumack. Also reset saw_newack to 0
4611 net->saw_newack = 0;
4612 net->this_sack_highest_newack = last_tsn;
4614 /* process the new consecutive TSN first */
4615 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4616 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4617 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4619 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4621 * If it is less than ACKED, it is
4622 * now no-longer in flight. Higher
4623 * values may occur during marking
4625 if ((tp1->whoTo->dest_state &
4626 SCTP_ADDR_UNCONFIRMED) &&
4627 (tp1->snd_count < 2)) {
4629 * If there was no retran
4630 * and the address is
4631 * un-confirmed and we sent
4633 * sacked.. its confirmed,
4636 tp1->whoTo->dest_state &=
4637 ~SCTP_ADDR_UNCONFIRMED;
4639 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4640 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4641 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4642 tp1->whoTo->flight_size,
4644 (uint32_t)(uintptr_t)tp1->whoTo,
4647 sctp_flight_size_decrease(tp1);
4648 sctp_total_flight_decrease(stcb, tp1);
4649 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4650 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4654 tp1->whoTo->net_ack += tp1->send_size;
4656 /* CMT SFR and DAC algos */
4657 this_sack_lowest_newack = tp1->rec.data.tsn;
4658 tp1->whoTo->saw_newack = 1;
4660 if (tp1->snd_count < 2) {
4662 * True non-retransmitted
4665 tp1->whoTo->net_ack2 +=
4668 /* update RTO too? */
4671 sctp_calculate_rto(stcb,
4674 &tp1->sent_rcv_time,
4675 SCTP_RTT_FROM_DATA)) {
4678 if (tp1->whoTo->rto_needed == 0) {
4679 tp1->whoTo->rto_needed = 1;
4685 * CMT: CUCv2 algorithm. From the
4686 * cumack'd TSNs, for each TSN being
4687 * acked for the first time, set the
4688 * following variables for the
4689 * corresp destination.
4690 * new_pseudo_cumack will trigger a
4692 * find_(rtx_)pseudo_cumack will
4693 * trigger search for the next
4694 * expected (rtx-)pseudo-cumack.
4696 tp1->whoTo->new_pseudo_cumack = 1;
4697 tp1->whoTo->find_pseudo_cumack = 1;
4698 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4700 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4701 sctp_log_sack(asoc->last_acked_seq,
4706 SCTP_LOG_TSN_ACKED);
4708 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4709 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4712 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4713 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4714 #ifdef SCTP_AUDITING_ENABLED
4715 sctp_audit_log(0xB3,
4716 (asoc->sent_queue_retran_cnt & 0x000000ff));
4719 if (tp1->rec.data.chunk_was_revoked) {
4720 /* deflate the cwnd */
4721 tp1->whoTo->cwnd -= tp1->book_size;
4722 tp1->rec.data.chunk_was_revoked = 0;
4724 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4725 tp1->sent = SCTP_DATAGRAM_ACKED;
4732 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4733 /* always set this up to cum-ack */
4734 asoc->this_sack_highest_gap = last_tsn;
4736 if ((num_seg > 0) || (num_nr_seg > 0)) {
4738 * thisSackHighestGap will increase while handling NEW
4739 * segments this_sack_highest_newack will increase while
4740 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4741 * used for CMT DAC algo. saw_newack will also change.
4743 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4744 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4745 num_seg, num_nr_seg, &rto_ok)) {
4749 * validate the biggest_tsn_acked in the gap acks if strict
4750 * adherence is wanted.
4752 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4754 * peer is either confused or we are under attack.
4757 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4758 biggest_tsn_acked, send_s);
4762 /*******************************************/
4763 /* cancel ALL T3-send timer if accum moved */
4764 /*******************************************/
4765 if (asoc->sctp_cmt_on_off > 0) {
4766 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4767 if (net->new_pseudo_cumack)
4768 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4770 SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4774 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4775 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4776 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4780 /********************************************/
4781 /* drop the acked chunks from the sentqueue */
4782 /********************************************/
4783 asoc->last_acked_seq = cum_ack;
4785 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4786 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4789 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4790 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4791 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4794 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4798 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4799 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4800 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4801 asoc->trigger_reset = 1;
4803 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4804 if (PR_SCTP_ENABLED(tp1->flags)) {
4805 if (asoc->pr_sctp_cnt != 0)
4806 asoc->pr_sctp_cnt--;
4808 asoc->sent_queue_cnt--;
4810 /* sa_ignore NO_NULL_CHK */
4811 sctp_free_bufspace(stcb, asoc, tp1, 1);
4812 sctp_m_freem(tp1->data);
4814 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4815 asoc->sent_queue_cnt_removeable--;
4818 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4819 sctp_log_sack(asoc->last_acked_seq,
4824 SCTP_LOG_FREE_SENT);
4826 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4829 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4831 panic("Warning flight size is positive and should be 0");
4833 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4834 asoc->total_flight);
4836 asoc->total_flight = 0;
4839 /* sa_ignore NO_NULL_CHK */
4840 if ((wake_him) && (stcb->sctp_socket)) {
4841 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4842 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4843 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4845 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4848 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4852 if (asoc->fast_retran_loss_recovery && accum_moved) {
4853 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4854 /* Setup so we will exit RFC2582 fast recovery */
4855 will_exit_fast_recovery = 1;
4859 * Check for revoked fragments:
4861 * if Previous sack - Had no frags then we can't have any revoked if
4862 * Previous sack - Had frag's then - If we now have frags aka
4863 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4864 * some of them. else - The peer revoked all ACKED fragments, since
4865 * we had some before and now we have NONE.
4869 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4870 asoc->saw_sack_with_frags = 1;
4871 } else if (asoc->saw_sack_with_frags) {
4872 int cnt_revoked = 0;
4874 /* Peer revoked all dg's marked or acked */
4875 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4876 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4877 tp1->sent = SCTP_DATAGRAM_SENT;
4878 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4879 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4880 tp1->whoTo->flight_size,
4882 (uint32_t)(uintptr_t)tp1->whoTo,
4885 sctp_flight_size_increase(tp1);
4886 sctp_total_flight_increase(stcb, tp1);
4887 tp1->rec.data.chunk_was_revoked = 1;
4889 * To ensure that this increase in
4890 * flightsize, which is artificial, does not
4891 * throttle the sender, we also increase the
4892 * cwnd artificially.
4894 tp1->whoTo->cwnd += tp1->book_size;
4901 asoc->saw_sack_with_frags = 0;
4904 asoc->saw_sack_with_nr_frags = 1;
4906 asoc->saw_sack_with_nr_frags = 0;
4908 /* JRS - Use the congestion control given in the CC module */
4909 if (ecne_seen == 0) {
4910 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4911 if (net->net_ack2 > 0) {
4913 * Karn's rule applies to clearing error
4914 * count, this is optional.
4916 net->error_count = 0;
4917 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4918 /* addr came good */
4919 net->dest_state |= SCTP_ADDR_REACHABLE;
4920 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4921 0, (void *)net, SCTP_SO_NOT_LOCKED);
4924 if (net == stcb->asoc.primary_destination) {
4925 if (stcb->asoc.alternate) {
4927 * release the alternate,
4930 sctp_free_remote_addr(stcb->asoc.alternate);
4931 stcb->asoc.alternate = NULL;
4935 if (net->dest_state & SCTP_ADDR_PF) {
4936 net->dest_state &= ~SCTP_ADDR_PF;
4937 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4938 stcb->sctp_ep, stcb, net,
4939 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4940 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4941 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4942 /* Done with this net */
4945 /* restore any doubled timers */
4946 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4947 if (net->RTO < stcb->asoc.minrto) {
4948 net->RTO = stcb->asoc.minrto;
4950 if (net->RTO > stcb->asoc.maxrto) {
4951 net->RTO = stcb->asoc.maxrto;
4955 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4958 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4959 /* nothing left in-flight */
4960 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4961 /* stop all timers */
4962 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4964 SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4965 net->flight_size = 0;
4966 net->partial_bytes_acked = 0;
4968 asoc->total_flight = 0;
4969 asoc->total_flight_count = 0;
4972 /**********************************/
4973 /* Now what about shutdown issues */
4974 /**********************************/
4975 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4976 /* nothing left on sendqueue.. consider done */
4977 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4978 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4979 asoc->peers_rwnd, 0, 0, a_rwnd);
4981 asoc->peers_rwnd = a_rwnd;
4982 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4983 /* SWS sender side engages */
4984 asoc->peers_rwnd = 0;
4987 if ((asoc->stream_queue_cnt == 1) &&
4988 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4989 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4990 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4991 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4993 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4994 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4995 (asoc->stream_queue_cnt == 1) &&
4996 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4997 struct mbuf *op_err;
5001 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5002 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5003 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5006 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5007 (asoc->stream_queue_cnt == 0)) {
5008 struct sctp_nets *netp;
5010 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5011 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5012 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5014 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5015 sctp_stop_timers_for_shutdown(stcb);
5016 if (asoc->alternate) {
5017 netp = asoc->alternate;
5019 netp = asoc->primary_destination;
5021 sctp_send_shutdown(stcb, netp);
5022 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5023 stcb->sctp_ep, stcb, netp);
5024 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5025 stcb->sctp_ep, stcb, NULL);
5027 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5028 (asoc->stream_queue_cnt == 0)) {
5029 struct sctp_nets *netp;
5031 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5032 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5033 sctp_stop_timers_for_shutdown(stcb);
5034 if (asoc->alternate) {
5035 netp = asoc->alternate;
5037 netp = asoc->primary_destination;
5039 sctp_send_shutdown_ack(stcb, netp);
5040 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5041 stcb->sctp_ep, stcb, netp);
5046 * Now here we are going to recycle net_ack for a different use...
5049 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5054 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5055 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5056 * automatically ensure that.
5058 if ((asoc->sctp_cmt_on_off > 0) &&
5059 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5060 (cmt_dac_flag == 0)) {
5061 this_sack_lowest_newack = cum_ack;
5063 if ((num_seg > 0) || (num_nr_seg > 0)) {
5064 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5065 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5067 /* JRS - Use the congestion control given in the CC module */
5068 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5070 /* Now are we exiting loss recovery ? */
5071 if (will_exit_fast_recovery) {
5072 /* Ok, we must exit fast recovery */
5073 asoc->fast_retran_loss_recovery = 0;
5075 if ((asoc->sat_t3_loss_recovery) &&
5076 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5077 /* end satellite t3 loss recovery */
5078 asoc->sat_t3_loss_recovery = 0;
5083 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5084 if (net->will_exit_fast_recovery) {
5085 /* Ok, we must exit fast recovery */
5086 net->fast_retran_loss_recovery = 0;
5090 /* Adjust and set the new rwnd value */
5091 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5092 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5093 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5095 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5096 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5097 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5098 /* SWS sender side engages */
5099 asoc->peers_rwnd = 0;
5101 if (asoc->peers_rwnd > old_rwnd) {
5102 win_probe_recovery = 1;
5106 * Now we must setup so we have a timer up for anyone with
5112 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5113 if (win_probe_recovery && (net->window_probe)) {
5114 win_probe_recovered = 1;
5116 * Find first chunk that was used with
5117 * window probe and clear the event. Put
5118 * it back into the send queue as if has
5121 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5122 if (tp1->window_probe) {
5123 sctp_window_probe_recovery(stcb, asoc, tp1);
5128 if (net->flight_size) {
5130 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5131 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5132 stcb->sctp_ep, stcb, net);
5134 if (net->window_probe) {
5135 net->window_probe = 0;
5138 if (net->window_probe) {
5140 * In window probes we must assure a timer
5141 * is still running there
5143 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5144 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5145 stcb->sctp_ep, stcb, net);
5147 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5148 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5150 SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5155 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5156 (asoc->sent_queue_retran_cnt == 0) &&
5157 (win_probe_recovered == 0) &&
5160 * huh, this should not happen unless all packets are
5161 * PR-SCTP and marked to skip of course.
5163 if (sctp_fs_audit(asoc)) {
5164 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5165 net->flight_size = 0;
5167 asoc->total_flight = 0;
5168 asoc->total_flight_count = 0;
5169 asoc->sent_queue_retran_cnt = 0;
5170 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5171 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5172 sctp_flight_size_increase(tp1);
5173 sctp_total_flight_increase(stcb, tp1);
5174 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5175 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5182 /*********************************************/
5183 /* Here we perform PR-SCTP procedures */
5185 /*********************************************/
5186 /* C1. update advancedPeerAckPoint */
5187 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5188 asoc->advanced_peer_ack_point = cum_ack;
5190 /* C2. try to further move advancedPeerAckPoint ahead */
5191 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5192 struct sctp_tmit_chunk *lchk;
5193 uint32_t old_adv_peer_ack_point;
5195 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5196 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5197 /* C3. See if we need to send a Fwd-TSN */
5198 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5200 * ISSUE with ECN, see FWD-TSN processing.
5202 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5203 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5204 0xee, cum_ack, asoc->advanced_peer_ack_point,
5205 old_adv_peer_ack_point);
5207 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5208 send_forward_tsn(stcb, asoc);
5210 /* try to FR fwd-tsn's that get lost too */
5211 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5212 send_forward_tsn(stcb, asoc);
5216 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5217 if (lchk->whoTo != NULL) {
5222 /* Assure a timer is up */
5223 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5224 stcb->sctp_ep, stcb, lchk->whoTo);
5227 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5228 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5230 stcb->asoc.peers_rwnd,
5231 stcb->asoc.total_flight,
5232 stcb->asoc.total_output_queue_size);
5237 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5240 uint32_t cum_ack, a_rwnd;
5242 cum_ack = ntohl(cp->cumulative_tsn_ack);
5243 /* Arrange so a_rwnd does NOT change */
5244 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5246 /* Now call the express sack handling */
5247 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5251 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5252 struct sctp_stream_in *strmin)
5254 struct sctp_queued_to_read *control, *ncontrol;
5255 struct sctp_association *asoc;
5257 int need_reasm_check = 0;
5260 mid = strmin->last_mid_delivered;
5262 * First deliver anything prior to and including the stream no that
5265 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5266 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5267 /* this is deliverable now */
5268 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5269 if (control->on_strm_q) {
5270 if (control->on_strm_q == SCTP_ON_ORDERED) {
5271 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5272 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5273 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5276 panic("strmin: %p ctl: %p unknown %d",
5277 strmin, control, control->on_strm_q);
5280 control->on_strm_q = 0;
5282 /* subtract pending on streams */
5283 if (asoc->size_on_all_streams >= control->length) {
5284 asoc->size_on_all_streams -= control->length;
5287 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5289 asoc->size_on_all_streams = 0;
5292 sctp_ucount_decr(asoc->cnt_on_all_streams);
5293 /* deliver it to at least the delivery-q */
5294 if (stcb->sctp_socket) {
5295 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5296 sctp_add_to_readq(stcb->sctp_ep, stcb,
5298 &stcb->sctp_socket->so_rcv,
5299 1, SCTP_READ_LOCK_HELD,
5300 SCTP_SO_NOT_LOCKED);
5303 /* Its a fragmented message */
5304 if (control->first_frag_seen) {
5306 * Make it so this is next to
5307 * deliver, we restore later
5309 strmin->last_mid_delivered = control->mid - 1;
5310 need_reasm_check = 1;
5315 /* no more delivery now. */
5319 if (need_reasm_check) {
5322 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5323 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5324 /* Restore the next to deliver unless we are ahead */
5325 strmin->last_mid_delivered = mid;
5328 /* Left the front Partial one on */
5331 need_reasm_check = 0;
5334 * now we must deliver things in queue the normal way if any are
5337 mid = strmin->last_mid_delivered + 1;
5338 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5339 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5340 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5341 /* this is deliverable now */
5342 if (control->on_strm_q) {
5343 if (control->on_strm_q == SCTP_ON_ORDERED) {
5344 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5345 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5346 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5349 panic("strmin: %p ctl: %p unknown %d",
5350 strmin, control, control->on_strm_q);
5353 control->on_strm_q = 0;
5355 /* subtract pending on streams */
5356 if (asoc->size_on_all_streams >= control->length) {
5357 asoc->size_on_all_streams -= control->length;
5360 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5362 asoc->size_on_all_streams = 0;
5365 sctp_ucount_decr(asoc->cnt_on_all_streams);
5366 /* deliver it to at least the delivery-q */
5367 strmin->last_mid_delivered = control->mid;
5368 if (stcb->sctp_socket) {
5369 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5370 sctp_add_to_readq(stcb->sctp_ep, stcb,
5372 &stcb->sctp_socket->so_rcv, 1,
5373 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5375 mid = strmin->last_mid_delivered + 1;
5377 /* Its a fragmented message */
5378 if (control->first_frag_seen) {
5380 * Make it so this is next to
5383 strmin->last_mid_delivered = control->mid - 1;
5384 need_reasm_check = 1;
5392 if (need_reasm_check) {
5393 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5398 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5399 struct sctp_association *asoc, struct sctp_stream_in *strm,
5400 struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5402 struct sctp_tmit_chunk *chk, *nchk;
5403 int cnt_removed = 0;
5406 * For now large messages held on the stream reasm that are complete
5407 * will be tossed too. We could in theory do more work to spin
5408 * through and stop after dumping one msg aka seeing the start of a
5409 * new msg at the head, and call the delivery function... to see if
5410 * it can be delivered... But for now we just dump everything on the
5413 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5416 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5417 /* Purge hanging chunks */
5418 if (!asoc->idata_supported && (ordered == 0)) {
5419 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5424 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5425 if (asoc->size_on_reasm_queue >= chk->send_size) {
5426 asoc->size_on_reasm_queue -= chk->send_size;
5429 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5431 asoc->size_on_reasm_queue = 0;
5434 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5436 sctp_m_freem(chk->data);
5439 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5441 if (!TAILQ_EMPTY(&control->reasm)) {
5442 /* This has to be old data, unordered */
5443 if (control->data) {
5444 sctp_m_freem(control->data);
5445 control->data = NULL;
5447 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5448 chk = TAILQ_FIRST(&control->reasm);
5449 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5450 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5451 sctp_add_chk_to_control(control, strm, stcb, asoc,
5452 chk, SCTP_READ_LOCK_HELD);
5454 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5457 if (control->on_strm_q == SCTP_ON_ORDERED) {
5458 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5459 if (asoc->size_on_all_streams >= control->length) {
5460 asoc->size_on_all_streams -= control->length;
5463 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5465 asoc->size_on_all_streams = 0;
5468 sctp_ucount_decr(asoc->cnt_on_all_streams);
5469 control->on_strm_q = 0;
5470 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5471 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5472 control->on_strm_q = 0;
5474 } else if (control->on_strm_q) {
5475 panic("strm: %p ctl: %p unknown %d",
5476 strm, control, control->on_strm_q);
5479 control->on_strm_q = 0;
5480 if (control->on_read_q == 0) {
5481 sctp_free_remote_addr(control->whoFrom);
5482 if (control->data) {
5483 sctp_m_freem(control->data);
5484 control->data = NULL;
5486 sctp_free_a_readq(stcb, control);
5491 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5492 struct sctp_forward_tsn_chunk *fwd,
5493 int *abort_flag, struct mbuf *m, int offset)
5495 /* The pr-sctp fwd tsn */
5497 * here we will perform all the data receiver side steps for
5498 * processing FwdTSN, as required in by pr-sctp draft:
5500 * Assume we get FwdTSN(x):
5502 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5503 * + others we have 3) examine and update re-ordering queue on
5504 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5505 * report where we are.
5507 struct sctp_association *asoc;
5508 uint32_t new_cum_tsn, gap;
5509 unsigned int i, fwd_sz, m_size;
5511 struct sctp_stream_in *strm;
5512 struct sctp_queued_to_read *control, *sv;
5515 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5516 SCTPDBG(SCTP_DEBUG_INDATA1,
5517 "Bad size too small/big fwd-tsn\n");
5520 m_size = (stcb->asoc.mapping_array_size << 3);
5521 /*************************************************************/
5522 /* 1. Here we update local cumTSN and shift the bitmap array */
5523 /*************************************************************/
5524 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5526 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5527 /* Already got there ... */
5531 * now we know the new TSN is more advanced, let's find the actual
5534 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5535 asoc->cumulative_tsn = new_cum_tsn;
5536 if (gap >= m_size) {
5537 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5538 struct mbuf *op_err;
5539 char msg[SCTP_DIAG_INFO_LEN];
5542 * out of range (of single byte chunks in the rwnd I
5543 * give out). This must be an attacker.
5546 SCTP_SNPRINTF(msg, sizeof(msg),
5547 "New cum ack %8.8x too high, highest TSN %8.8x",
5548 new_cum_tsn, asoc->highest_tsn_inside_map);
5549 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5550 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5551 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5554 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5556 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5557 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5558 asoc->highest_tsn_inside_map = new_cum_tsn;
5560 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5561 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5564 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5567 SCTP_TCB_LOCK_ASSERT(stcb);
5568 for (i = 0; i <= gap; i++) {
5569 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5570 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5571 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5572 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5573 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5578 /*************************************************************/
5579 /* 2. Clear up re-assembly queue */
5580 /*************************************************************/
5582 /* This is now done as part of clearing up the stream/seq */
5583 if (asoc->idata_supported == 0) {
5586 /* Flush all the un-ordered data based on cum-tsn */
5587 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5588 for (sid = 0; sid < asoc->streamincnt; sid++) {
5589 strm = &asoc->strmin[sid];
5590 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5591 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5594 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5596 /*******************************************************/
5597 /* 3. Update the PR-stream re-ordering queues and fix */
5598 /* delivery issues as needed. */
5599 /*******************************************************/
5600 fwd_sz -= sizeof(*fwd);
5603 unsigned int num_str;
5606 uint16_t ordered, flags;
5607 struct sctp_strseq *stseq, strseqbuf;
5608 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5610 offset += sizeof(*fwd);
5612 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5613 if (asoc->idata_supported) {
5614 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5616 num_str = fwd_sz / sizeof(struct sctp_strseq);
5618 for (i = 0; i < num_str; i++) {
5619 if (asoc->idata_supported) {
5620 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5621 sizeof(struct sctp_strseq_mid),
5622 (uint8_t *)&strseqbuf_m);
5623 offset += sizeof(struct sctp_strseq_mid);
5624 if (stseq_m == NULL) {
5627 sid = ntohs(stseq_m->sid);
5628 mid = ntohl(stseq_m->mid);
5629 flags = ntohs(stseq_m->flags);
5630 if (flags & PR_SCTP_UNORDERED_FLAG) {
5636 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5637 sizeof(struct sctp_strseq),
5638 (uint8_t *)&strseqbuf);
5639 offset += sizeof(struct sctp_strseq);
5640 if (stseq == NULL) {
5643 sid = ntohs(stseq->sid);
5644 mid = (uint32_t)ntohs(stseq->ssn);
5652 * Ok we now look for the stream/seq on the read
5653 * queue where its not all delivered. If we find it
5654 * we transmute the read entry into a PDI_ABORTED.
5656 if (sid >= asoc->streamincnt) {
5657 /* screwed up streams, stop! */
5660 if ((asoc->str_of_pdapi == sid) &&
5661 (asoc->ssn_of_pdapi == mid)) {
5663 * If this is the one we were partially
5664 * delivering now then we no longer are.
5665 * Note this will change with the reassembly
5668 asoc->fragmented_delivery_inprogress = 0;
5670 strm = &asoc->strmin[sid];
5672 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
5673 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5674 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5678 if (asoc->idata_supported) {
5679 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
5680 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5681 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5685 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5686 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5690 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5691 if ((control->sinfo_stream == sid) &&
5692 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5693 str_seq = (sid << 16) | (0x0000ffff & mid);
5694 control->pdapi_aborted = 1;
5695 sv = stcb->asoc.control_pdapi;
5696 control->end_added = 1;
5697 if (control->on_strm_q == SCTP_ON_ORDERED) {
5698 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5699 if (asoc->size_on_all_streams >= control->length) {
5700 asoc->size_on_all_streams -= control->length;
5703 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5705 asoc->size_on_all_streams = 0;
5708 sctp_ucount_decr(asoc->cnt_on_all_streams);
5709 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5710 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5712 } else if (control->on_strm_q) {
5713 panic("strm: %p ctl: %p unknown %d",
5714 strm, control, control->on_strm_q);
5717 control->on_strm_q = 0;
5718 stcb->asoc.control_pdapi = control;
5719 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5721 SCTP_PARTIAL_DELIVERY_ABORTED,
5723 SCTP_SO_NOT_LOCKED);
5724 stcb->asoc.control_pdapi = sv;
5726 } else if ((control->sinfo_stream == sid) &&
5727 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5728 /* We are past our victim SSN */
5732 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5733 /* Update the sequence number */
5734 strm->last_mid_delivered = mid;
5736 /* now kick the stream the new way */
5737 /* sa_ignore NO_NULL_CHK */
5738 sctp_kick_prsctp_reorder_queue(stcb, strm);
5740 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5743 * Now slide thing forward.
5745 sctp_slide_mapping_arrays(stcb);