2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int lock_held);
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
78 /* Calculate what the rwnd would be */
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
85 * This is really set wrong with respect to a 1-2-m socket. Since
86 * the sb_cc is the count that everyone as put up. When we re-write
87 * sctp_soreceive then we will fix this so that ONLY this
88 * associations data is taken into account.
90 if (stcb->sctp_socket == NULL) {
93 if (stcb->asoc.sb_cc == 0 &&
94 asoc->size_on_reasm_queue == 0 &&
95 asoc->size_on_all_streams == 0) {
96 /* Full rwnd granted */
97 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
100 /* get actual space */
101 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
103 * take out what has NOT been put on socket queue and we yet hold
106 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
107 asoc->cnt_on_reasm_queue * MSIZE));
108 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
109 asoc->cnt_on_all_streams * MSIZE));
114 /* what is the overhead of all these rwnd's */
115 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
117 * If the window gets too small due to ctrl-stuff, reduce it to 1,
118 * even it is 0. SWS engaged
120 if (calc < stcb->asoc.my_rwnd_control_len) {
129 * Build out our readq entry based on the incoming packet.
131 struct sctp_queued_to_read *
132 sctp_build_readq_entry(struct sctp_tcb *stcb,
133 struct sctp_nets *net,
134 uint32_t tsn, uint32_t ppid,
135 uint32_t context, uint16_t sid,
136 uint32_t mid, uint8_t flags,
139 struct sctp_queued_to_read *read_queue_e = NULL;
141 sctp_alloc_a_readq(stcb, read_queue_e);
142 if (read_queue_e == NULL) {
145 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
146 read_queue_e->sinfo_stream = sid;
147 read_queue_e->sinfo_flags = (flags << 8);
148 read_queue_e->sinfo_ppid = ppid;
149 read_queue_e->sinfo_context = context;
150 read_queue_e->sinfo_tsn = tsn;
151 read_queue_e->sinfo_cumtsn = tsn;
152 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
153 read_queue_e->mid = mid;
154 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
155 TAILQ_INIT(&read_queue_e->reasm);
156 read_queue_e->whoFrom = net;
157 atomic_add_int(&net->ref_count, 1);
158 read_queue_e->data = dm;
159 read_queue_e->stcb = stcb;
160 read_queue_e->port_from = stcb->rport;
162 return (read_queue_e);
166 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
168 struct sctp_extrcvinfo *seinfo;
169 struct sctp_sndrcvinfo *outinfo;
170 struct sctp_rcvinfo *rcvinfo;
171 struct sctp_nxtinfo *nxtinfo;
178 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
179 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
180 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
181 /* user does not want any ancillary data */
185 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
186 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
188 seinfo = (struct sctp_extrcvinfo *)sinfo;
189 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
190 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
192 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
196 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
197 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
199 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
202 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
208 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
213 SCTP_BUF_LEN(ret) = 0;
215 /* We need a CMSG header followed by the struct */
216 cmh = mtod(ret, struct cmsghdr *);
218 * Make sure that there is no un-initialized padding between the
219 * cmsg header and cmsg data and after the cmsg data.
222 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
223 cmh->cmsg_level = IPPROTO_SCTP;
224 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
225 cmh->cmsg_type = SCTP_RCVINFO;
226 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
227 rcvinfo->rcv_sid = sinfo->sinfo_stream;
228 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
229 rcvinfo->rcv_flags = sinfo->sinfo_flags;
230 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
231 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
232 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
233 rcvinfo->rcv_context = sinfo->sinfo_context;
234 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
235 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
236 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
239 cmh->cmsg_level = IPPROTO_SCTP;
240 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
241 cmh->cmsg_type = SCTP_NXTINFO;
242 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
243 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
244 nxtinfo->nxt_flags = 0;
245 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
246 nxtinfo->nxt_flags |= SCTP_UNORDERED;
248 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
249 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
251 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
252 nxtinfo->nxt_flags |= SCTP_COMPLETE;
254 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
255 nxtinfo->nxt_length = seinfo->serinfo_next_length;
256 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
257 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
258 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
260 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
261 cmh->cmsg_level = IPPROTO_SCTP;
262 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
264 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
265 cmh->cmsg_type = SCTP_EXTRCV;
266 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
267 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
269 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
270 cmh->cmsg_type = SCTP_SNDRCV;
272 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
280 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
282 uint32_t gap, i, cumackp1;
284 int in_r = 0, in_nr = 0;
286 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
289 cumackp1 = asoc->cumulative_tsn + 1;
290 if (SCTP_TSN_GT(cumackp1, tsn)) {
292 * this tsn is behind the cum ack and thus we don't need to
293 * worry about it being moved from one to the other.
297 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
298 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
299 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
300 if ((in_r == 0) && (in_nr == 0)) {
302 panic("Things are really messed up now");
304 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
305 sctp_print_mapping_array(asoc);
309 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
311 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
312 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
313 asoc->highest_tsn_inside_nr_map = tsn;
315 if (tsn == asoc->highest_tsn_inside_map) {
316 /* We must back down to see what the new highest is */
317 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
318 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
319 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
320 asoc->highest_tsn_inside_map = i;
326 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
332 sctp_place_control_in_stream(struct sctp_stream_in *strm,
333 struct sctp_association *asoc,
334 struct sctp_queued_to_read *control)
336 struct sctp_queued_to_read *at;
337 struct sctp_readhead *q;
338 uint8_t flags, unordered;
340 flags = (control->sinfo_flags >> 8);
341 unordered = flags & SCTP_DATA_UNORDERED;
343 q = &strm->uno_inqueue;
344 if (asoc->idata_supported == 0) {
345 if (!TAILQ_EMPTY(q)) {
347 * Only one stream can be here in old style
352 TAILQ_INSERT_TAIL(q, control, next_instrm);
353 control->on_strm_q = SCTP_ON_UNORDERED;
359 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
360 control->end_added = 1;
361 control->first_frag_seen = 1;
362 control->last_frag_seen = 1;
364 if (TAILQ_EMPTY(q)) {
366 TAILQ_INSERT_HEAD(q, control, next_instrm);
368 control->on_strm_q = SCTP_ON_UNORDERED;
370 control->on_strm_q = SCTP_ON_ORDERED;
374 TAILQ_FOREACH(at, q, next_instrm) {
375 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
377 * one in queue is bigger than the new one,
378 * insert before this one
380 TAILQ_INSERT_BEFORE(at, control, next_instrm);
382 control->on_strm_q = SCTP_ON_UNORDERED;
384 control->on_strm_q = SCTP_ON_ORDERED;
387 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
389 * Gak, He sent me a duplicate msg id
390 * number?? return -1 to abort.
394 if (TAILQ_NEXT(at, next_instrm) == NULL) {
396 * We are at the end, insert it
399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
400 sctp_log_strm_del(control, at,
401 SCTP_STR_LOG_FROM_INSERT_TL);
403 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
405 control->on_strm_q = SCTP_ON_UNORDERED;
407 control->on_strm_q = SCTP_ON_ORDERED;
418 sctp_abort_in_reasm(struct sctp_tcb *stcb,
419 struct sctp_queued_to_read *control,
420 struct sctp_tmit_chunk *chk,
421 int *abort_flag, int opspot)
423 char msg[SCTP_DIAG_INFO_LEN];
426 if (stcb->asoc.idata_supported) {
427 snprintf(msg, sizeof(msg),
428 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
430 control->fsn_included,
433 chk->rec.data.fsn, chk->rec.data.mid);
435 snprintf(msg, sizeof(msg),
436 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
438 control->fsn_included,
442 (uint16_t)chk->rec.data.mid);
444 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
445 sctp_m_freem(chk->data);
447 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
448 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
449 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
454 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
457 * The control could not be placed and must be cleaned.
459 struct sctp_tmit_chunk *chk, *nchk;
461 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
462 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
464 sctp_m_freem(chk->data);
466 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
468 sctp_free_a_readq(stcb, control);
472 * Queue the chunk either right into the socket buffer if it is the next one
473 * to go OR put it in the correct place in the delivery queue. If we do
474 * append to the so_buf, keep doing so until we are out of order as
475 * long as the control's entered are non-fragmented.
478 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
479 struct sctp_association *asoc,
480 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
483 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
484 * all the data in one stream this could happen quite rapidly. One
485 * could use the TSN to keep track of things, but this scheme breaks
486 * down in the other type of stream usage that could occur. Send a
487 * single msg to stream 0, send 4Billion messages to stream 1, now
488 * send a message to stream 0. You have a situation where the TSN
489 * has wrapped but not in the stream. Is this worth worrying about
490 * or should we just change our queue sort at the bottom to be by
493 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
494 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
495 * assignment this could happen... and I don't see how this would be
496 * a violation. So for now I am undecided an will leave the sort by
497 * SSN alone. Maybe a hybred approach is the answer
500 struct sctp_queued_to_read *at;
504 struct sctp_stream_in *strm;
505 char msg[SCTP_DIAG_INFO_LEN];
507 strm = &asoc->strmin[control->sinfo_stream];
508 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
509 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
511 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
512 /* The incoming sseq is behind where we last delivered? */
513 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
514 strm->last_mid_delivered, control->mid);
516 * throw it in the stream so it gets cleaned up in
517 * association destruction
519 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
520 if (asoc->idata_supported) {
521 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
522 strm->last_mid_delivered, control->sinfo_tsn,
523 control->sinfo_stream, control->mid);
525 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
526 (uint16_t)strm->last_mid_delivered,
528 control->sinfo_stream,
529 (uint16_t)control->mid);
531 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
532 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
533 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
539 asoc->size_on_all_streams += control->length;
540 sctp_ucount_incr(asoc->cnt_on_all_streams);
541 nxt_todel = strm->last_mid_delivered + 1;
542 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
543 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
546 so = SCTP_INP_SO(stcb->sctp_ep);
547 atomic_add_int(&stcb->asoc.refcnt, 1);
548 SCTP_TCB_UNLOCK(stcb);
549 SCTP_SOCKET_LOCK(so, 1);
551 atomic_subtract_int(&stcb->asoc.refcnt, 1);
552 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
553 SCTP_SOCKET_UNLOCK(so, 1);
557 /* can be delivered right away? */
558 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
559 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
561 /* EY it wont be queued if it could be delivered directly */
563 asoc->size_on_all_streams -= control->length;
564 sctp_ucount_decr(asoc->cnt_on_all_streams);
565 strm->last_mid_delivered++;
566 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
567 sctp_add_to_readq(stcb->sctp_ep, stcb,
569 &stcb->sctp_socket->so_rcv, 1,
570 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
571 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
573 nxt_todel = strm->last_mid_delivered + 1;
574 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
575 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
576 asoc->size_on_all_streams -= control->length;
577 sctp_ucount_decr(asoc->cnt_on_all_streams);
578 if (control->on_strm_q == SCTP_ON_ORDERED) {
579 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
582 panic("Huh control: %p is on_strm_q: %d",
583 control, control->on_strm_q);
586 control->on_strm_q = 0;
587 strm->last_mid_delivered++;
589 * We ignore the return of deliver_data here
590 * since we always can hold the chunk on the
591 * d-queue. And we have a finite number that
592 * can be delivered from the strq.
594 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
595 sctp_log_strm_del(control, NULL,
596 SCTP_STR_LOG_FROM_IMMED_DEL);
598 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
599 sctp_add_to_readq(stcb->sctp_ep, stcb,
601 &stcb->sctp_socket->so_rcv, 1,
602 SCTP_READ_LOCK_NOT_HELD,
605 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
610 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
611 SCTP_SOCKET_UNLOCK(so, 1);
616 * Ok, we did not deliver this guy, find the correct place
617 * to put it on the queue.
619 if (sctp_place_control_in_stream(strm, asoc, control)) {
620 snprintf(msg, sizeof(msg),
621 "Queue to str MID: %u duplicate",
623 sctp_clean_up_control(stcb, control);
624 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
625 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
626 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
634 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
636 struct mbuf *m, *prev = NULL;
637 struct sctp_tcb *stcb;
639 stcb = control->stcb;
640 control->held_length = 0;
644 if (SCTP_BUF_LEN(m) == 0) {
645 /* Skip mbufs with NO length */
648 control->data = sctp_m_free(m);
651 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
652 m = SCTP_BUF_NEXT(prev);
655 control->tail_mbuf = prev;
660 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
661 if (control->on_read_q) {
663 * On read queue so we must increment the SB stuff,
664 * we assume caller has done any locks of SB.
666 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
668 m = SCTP_BUF_NEXT(m);
671 control->tail_mbuf = prev;
676 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
678 struct mbuf *prev = NULL;
679 struct sctp_tcb *stcb;
681 stcb = control->stcb;
684 panic("Control broken");
689 if (control->tail_mbuf == NULL) {
692 sctp_setup_tail_pointer(control);
695 control->tail_mbuf->m_next = m;
697 if (SCTP_BUF_LEN(m) == 0) {
698 /* Skip mbufs with NO length */
701 control->tail_mbuf->m_next = sctp_m_free(m);
702 m = control->tail_mbuf->m_next;
704 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
705 m = SCTP_BUF_NEXT(prev);
708 control->tail_mbuf = prev;
713 if (control->on_read_q) {
715 * On read queue so we must increment the SB stuff,
716 * we assume caller has done any locks of SB.
718 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
720 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
721 m = SCTP_BUF_NEXT(m);
724 control->tail_mbuf = prev;
729 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
731 memset(nc, 0, sizeof(struct sctp_queued_to_read));
732 nc->sinfo_stream = control->sinfo_stream;
733 nc->mid = control->mid;
734 TAILQ_INIT(&nc->reasm);
735 nc->top_fsn = control->top_fsn;
736 nc->mid = control->mid;
737 nc->sinfo_flags = control->sinfo_flags;
738 nc->sinfo_ppid = control->sinfo_ppid;
739 nc->sinfo_context = control->sinfo_context;
740 nc->fsn_included = 0xffffffff;
741 nc->sinfo_tsn = control->sinfo_tsn;
742 nc->sinfo_cumtsn = control->sinfo_cumtsn;
743 nc->sinfo_assoc_id = control->sinfo_assoc_id;
744 nc->whoFrom = control->whoFrom;
745 atomic_add_int(&nc->whoFrom->ref_count, 1);
746 nc->stcb = control->stcb;
747 nc->port_from = control->port_from;
751 sctp_reset_a_control(struct sctp_queued_to_read *control,
752 struct sctp_inpcb *inp, uint32_t tsn)
754 control->fsn_included = tsn;
755 if (control->on_read_q) {
757 * We have to purge it from there, hopefully this will work
760 TAILQ_REMOVE(&inp->read_queue, control, next);
761 control->on_read_q = 0;
766 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
767 struct sctp_association *asoc,
768 struct sctp_stream_in *strm,
769 struct sctp_queued_to_read *control,
771 int inp_read_lock_held)
774 * Special handling for the old un-ordered data chunk. All the
775 * chunks/TSN's go to mid 0. So we have to do the old style watching
776 * to see if we have it all. If you return one, no other control
777 * entries on the un-ordered queue will be looked at. In theory
778 * there should be no others entries in reality, unless the guy is
779 * sending both unordered NDATA and unordered DATA...
781 struct sctp_tmit_chunk *chk, *lchk, *tchk;
783 struct sctp_queued_to_read *nc;
786 if (control->first_frag_seen == 0) {
787 /* Nothing we can do, we have not seen the first piece yet */
790 /* Collapse any we can */
793 fsn = control->fsn_included + 1;
794 /* Now what can we add? */
795 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
796 if (chk->rec.data.fsn == fsn) {
798 sctp_alloc_a_readq(stcb, nc);
802 memset(nc, 0, sizeof(struct sctp_queued_to_read));
803 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
804 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
808 if (control->end_added) {
810 if (!TAILQ_EMPTY(&control->reasm)) {
812 * Ok we have to move anything left
813 * on the control queue to a new
816 sctp_build_readq_entry_from_ctl(nc, control);
817 tchk = TAILQ_FIRST(&control->reasm);
818 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
819 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
820 asoc->size_on_reasm_queue -= tchk->send_size;
821 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
822 nc->first_frag_seen = 1;
823 nc->fsn_included = tchk->rec.data.fsn;
824 nc->data = tchk->data;
825 nc->sinfo_ppid = tchk->rec.data.ppid;
826 nc->sinfo_tsn = tchk->rec.data.tsn;
827 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
829 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
830 sctp_setup_tail_pointer(nc);
831 tchk = TAILQ_FIRST(&control->reasm);
833 /* Spin the rest onto the queue */
835 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
836 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
837 tchk = TAILQ_FIRST(&control->reasm);
840 * Now lets add it to the queue
841 * after removing control
843 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
844 nc->on_strm_q = SCTP_ON_UNORDERED;
845 if (control->on_strm_q) {
846 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
847 control->on_strm_q = 0;
850 if (control->pdapi_started) {
851 strm->pd_api_started = 0;
852 control->pdapi_started = 0;
854 if (control->on_strm_q) {
855 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
856 control->on_strm_q = 0;
857 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
859 if (control->on_read_q == 0) {
860 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
861 &stcb->sctp_socket->so_rcv, control->end_added,
862 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
864 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
865 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
867 * Switch to the new guy and
873 if (nc->on_strm_q == 0) {
874 sctp_free_a_readq(stcb, nc);
879 sctp_free_a_readq(stcb, nc);
886 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
887 strm->pd_api_started = 1;
888 control->pdapi_started = 1;
889 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
890 &stcb->sctp_socket->so_rcv, control->end_added,
891 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
892 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
900 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
901 struct sctp_association *asoc,
902 struct sctp_queued_to_read *control,
903 struct sctp_tmit_chunk *chk,
906 struct sctp_tmit_chunk *at;
910 * Here we need to place the chunk into the control structure sorted
911 * in the correct order.
913 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
914 /* Its the very first one. */
915 SCTPDBG(SCTP_DEBUG_XXX,
916 "chunk is a first fsn: %u becomes fsn_included\n",
918 if (control->first_frag_seen) {
920 * In old un-ordered we can reassembly on one
921 * control multiple messages. As long as the next
922 * FIRST is greater then the old first (TSN i.e. FSN
928 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
930 * Easy way the start of a new guy beyond
935 if ((chk->rec.data.fsn == control->fsn_included) ||
936 (control->pdapi_started)) {
938 * Ok this should not happen, if it does we
939 * started the pd-api on the higher TSN
940 * (since the equals part is a TSN failure
943 * We are completly hosed in that case since
944 * I have no way to recover. This really
945 * will only happen if we can get more TSN's
946 * higher before the pd-api-point.
948 sctp_abort_in_reasm(stcb, control, chk,
950 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
955 * Ok we have two firsts and the one we just got is
956 * smaller than the one we previously placed.. yuck!
957 * We must swap them out.
960 tdata = control->data;
961 control->data = chk->data;
963 /* Save the lengths */
964 chk->send_size = control->length;
965 /* Recompute length of control and tail pointer */
966 sctp_setup_tail_pointer(control);
967 /* Fix the FSN included */
968 tmp = control->fsn_included;
969 control->fsn_included = chk->rec.data.fsn;
970 chk->rec.data.fsn = tmp;
971 /* Fix the TSN included */
972 tmp = control->sinfo_tsn;
973 control->sinfo_tsn = chk->rec.data.tsn;
974 chk->rec.data.tsn = tmp;
975 /* Fix the PPID included */
976 tmp = control->sinfo_ppid;
977 control->sinfo_ppid = chk->rec.data.ppid;
978 chk->rec.data.ppid = tmp;
979 /* Fix tail pointer */
982 control->first_frag_seen = 1;
983 control->fsn_included = chk->rec.data.fsn;
984 control->top_fsn = chk->rec.data.fsn;
985 control->sinfo_tsn = chk->rec.data.tsn;
986 control->sinfo_ppid = chk->rec.data.ppid;
987 control->data = chk->data;
988 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
990 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
991 sctp_setup_tail_pointer(control);
996 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
997 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
999 * This one in queue is bigger than the new one,
1000 * insert the new one before at.
1002 asoc->size_on_reasm_queue += chk->send_size;
1003 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1005 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1007 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1009 * They sent a duplicate fsn number. This really
1010 * should not happen since the FSN is a TSN and it
1011 * should have been dropped earlier.
1013 sctp_abort_in_reasm(stcb, control, chk,
1015 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1019 if (inserted == 0) {
1020 /* Its at the end */
1021 asoc->size_on_reasm_queue += chk->send_size;
1022 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1023 control->top_fsn = chk->rec.data.fsn;
1024 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1029 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1030 struct sctp_stream_in *strm, int inp_read_lock_held)
1033 * Given a stream, strm, see if any of the SSN's on it that are
1034 * fragmented are ready to deliver. If so go ahead and place them on
1035 * the read queue. In so placing if we have hit the end, then we
1036 * need to remove them from the stream's queue.
1038 struct sctp_queued_to_read *control, *nctl = NULL;
1039 uint32_t next_to_del;
1043 if (stcb->sctp_socket) {
1044 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1045 stcb->sctp_ep->partial_delivery_point);
1047 pd_point = stcb->sctp_ep->partial_delivery_point;
1049 control = TAILQ_FIRST(&strm->uno_inqueue);
1051 if ((control != NULL) &&
1052 (asoc->idata_supported == 0)) {
1053 /* Special handling needed for "old" data format */
1054 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1058 if (strm->pd_api_started) {
1059 /* Can't add more */
1063 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1064 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1065 nctl = TAILQ_NEXT(control, next_instrm);
1066 if (control->end_added) {
1067 /* We just put the last bit on */
1068 if (control->on_strm_q) {
1070 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1071 panic("Huh control: %p on_q: %d -- not unordered?",
1072 control, control->on_strm_q);
1075 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1076 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1077 control->on_strm_q = 0;
1079 if (control->on_read_q == 0) {
1080 sctp_add_to_readq(stcb->sctp_ep, stcb,
1082 &stcb->sctp_socket->so_rcv, control->end_added,
1083 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1086 /* Can we do a PD-API for this un-ordered guy? */
1087 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1088 strm->pd_api_started = 1;
1089 control->pdapi_started = 1;
1090 sctp_add_to_readq(stcb->sctp_ep, stcb,
1092 &stcb->sctp_socket->so_rcv, control->end_added,
1093 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1101 control = TAILQ_FIRST(&strm->inqueue);
1102 if (strm->pd_api_started) {
1103 /* Can't add more */
1106 if (control == NULL) {
1109 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1111 * Ok the guy at the top was being partially delivered
1112 * completed, so we remove it. Note the pd_api flag was
1113 * taken off when the chunk was merged on in
1114 * sctp_queue_data_for_reasm below.
1116 nctl = TAILQ_NEXT(control, next_instrm);
1117 SCTPDBG(SCTP_DEBUG_XXX,
1118 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1119 control, control->end_added, control->mid,
1120 control->top_fsn, control->fsn_included,
1121 strm->last_mid_delivered);
1122 if (control->end_added) {
1123 if (control->on_strm_q) {
1125 if (control->on_strm_q != SCTP_ON_ORDERED) {
1126 panic("Huh control: %p on_q: %d -- not ordered?",
1127 control, control->on_strm_q);
1130 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1131 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1132 control->on_strm_q = 0;
1134 if (strm->pd_api_started && control->pdapi_started) {
1135 control->pdapi_started = 0;
1136 strm->pd_api_started = 0;
1138 if (control->on_read_q == 0) {
1139 sctp_add_to_readq(stcb->sctp_ep, stcb,
1141 &stcb->sctp_socket->so_rcv, control->end_added,
1142 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1147 if (strm->pd_api_started) {
1149 * Can't add more must have gotten an un-ordered above being
1150 * partially delivered.
1155 next_to_del = strm->last_mid_delivered + 1;
1157 SCTPDBG(SCTP_DEBUG_XXX,
1158 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1159 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1161 nctl = TAILQ_NEXT(control, next_instrm);
1162 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1163 (control->first_frag_seen)) {
1166 /* Ok we can deliver it onto the stream. */
1167 if (control->end_added) {
1168 /* We are done with it afterwards */
1169 if (control->on_strm_q) {
1171 if (control->on_strm_q != SCTP_ON_ORDERED) {
1172 panic("Huh control: %p on_q: %d -- not ordered?",
1173 control, control->on_strm_q);
1176 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1177 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1178 control->on_strm_q = 0;
1182 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1184 * A singleton now slipping through - mark
1185 * it non-revokable too
1187 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1188 } else if (control->end_added == 0) {
1190 * Check if we can defer adding until its
1193 if ((control->length < pd_point) || (strm->pd_api_started)) {
1195 * Don't need it or cannot add more
1196 * (one being delivered that way)
1201 done = (control->end_added) && (control->last_frag_seen);
1202 if (control->on_read_q == 0) {
1203 sctp_add_to_readq(stcb->sctp_ep, stcb,
1205 &stcb->sctp_socket->so_rcv, control->end_added,
1206 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1208 strm->last_mid_delivered = next_to_del;
1213 /* We are now doing PD API */
1214 strm->pd_api_started = 1;
1215 control->pdapi_started = 1;
1225 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1226 struct sctp_stream_in *strm,
1227 struct sctp_tcb *stcb, struct sctp_association *asoc,
1228 struct sctp_tmit_chunk *chk, int hold_rlock)
1231 * Given a control and a chunk, merge the data from the chk onto the
1232 * control and free up the chunk resources.
1236 if (control->on_read_q && (hold_rlock == 0)) {
1238 * Its being pd-api'd so we must do some locks.
1240 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1243 if (control->data == NULL) {
1244 control->data = chk->data;
1245 sctp_setup_tail_pointer(control);
1247 sctp_add_to_tail_pointer(control, chk->data);
1249 control->fsn_included = chk->rec.data.fsn;
1250 asoc->size_on_reasm_queue -= chk->send_size;
1251 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1252 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1254 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1255 control->first_frag_seen = 1;
1256 control->sinfo_tsn = chk->rec.data.tsn;
1257 control->sinfo_ppid = chk->rec.data.ppid;
1259 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1261 if ((control->on_strm_q) && (control->on_read_q)) {
1262 if (control->pdapi_started) {
1263 control->pdapi_started = 0;
1264 strm->pd_api_started = 0;
1266 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1268 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1269 control->on_strm_q = 0;
1270 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1272 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1273 control->on_strm_q = 0;
1275 } else if (control->on_strm_q) {
1276 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1277 control->on_strm_q);
1281 control->end_added = 1;
1282 control->last_frag_seen = 1;
1285 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1287 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1291 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1292 * queue, see if anthing can be delivered. If so pull it off (or as much as
1293 * we can. If we run out of space then we must dump what we can and set the
1294 * appropriate flag to say we queued what we could.
1297 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1298 struct sctp_queued_to_read *control,
1299 struct sctp_tmit_chunk *chk,
1300 int created_control,
1301 int *abort_flag, uint32_t tsn)
1304 struct sctp_tmit_chunk *at, *nat;
1305 struct sctp_stream_in *strm;
1306 int do_wakeup, unordered;
1308 strm = &asoc->strmin[control->sinfo_stream];
1310 * For old un-ordered data chunks.
1312 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1317 /* Must be added to the stream-in queue */
1318 if (created_control) {
1319 if (sctp_place_control_in_stream(strm, asoc, control)) {
1320 /* Duplicate SSN? */
1321 sctp_clean_up_control(stcb, control);
1322 sctp_abort_in_reasm(stcb, control, chk,
1324 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1327 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1329 * Ok we created this control and now lets validate
1330 * that its legal i.e. there is a B bit set, if not
1331 * and we have up to the cum-ack then its invalid.
1333 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1334 sctp_abort_in_reasm(stcb, control, chk,
1336 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1341 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1342 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1346 * Ok we must queue the chunk into the reasembly portion: o if its
1347 * the first it goes to the control mbuf. o if its not first but the
1348 * next in sequence it goes to the control, and each succeeding one
1349 * in order also goes. o if its not in order we place it on the list
1352 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1353 /* Its the very first one. */
1354 SCTPDBG(SCTP_DEBUG_XXX,
1355 "chunk is a first fsn: %u becomes fsn_included\n",
1357 if (control->first_frag_seen) {
1359 * Error on senders part, they either sent us two
1360 * data chunks with FIRST, or they sent two
1361 * un-ordered chunks that were fragmented at the
1362 * same time in the same stream.
1364 sctp_abort_in_reasm(stcb, control, chk,
1366 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1369 control->first_frag_seen = 1;
1370 control->sinfo_ppid = chk->rec.data.ppid;
1371 control->sinfo_tsn = chk->rec.data.tsn;
1372 control->fsn_included = chk->rec.data.fsn;
1373 control->data = chk->data;
1374 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1376 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1377 sctp_setup_tail_pointer(control);
1379 /* Place the chunk in our list */
1382 if (control->last_frag_seen == 0) {
1383 /* Still willing to raise highest FSN seen */
1384 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1385 SCTPDBG(SCTP_DEBUG_XXX,
1386 "We have a new top_fsn: %u\n",
1388 control->top_fsn = chk->rec.data.fsn;
1390 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1391 SCTPDBG(SCTP_DEBUG_XXX,
1392 "The last fsn is now in place fsn: %u\n",
1394 control->last_frag_seen = 1;
1396 if (asoc->idata_supported || control->first_frag_seen) {
1398 * For IDATA we always check since we know
1399 * that the first fragment is 0. For old
1400 * DATA we have to receive the first before
1401 * we know the first FSN (which is the TSN).
1403 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1405 * We have already delivered up to
1408 sctp_abort_in_reasm(stcb, control, chk,
1410 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1415 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1416 /* Second last? huh? */
1417 SCTPDBG(SCTP_DEBUG_XXX,
1418 "Duplicate last fsn: %u (top: %u) -- abort\n",
1419 chk->rec.data.fsn, control->top_fsn);
1420 sctp_abort_in_reasm(stcb, control,
1422 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1425 if (asoc->idata_supported || control->first_frag_seen) {
1427 * For IDATA we always check since we know
1428 * that the first fragment is 0. For old
1429 * DATA we have to receive the first before
1430 * we know the first FSN (which is the TSN).
1433 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1435 * We have already delivered up to
1438 SCTPDBG(SCTP_DEBUG_XXX,
1439 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1440 chk->rec.data.fsn, control->fsn_included);
1441 sctp_abort_in_reasm(stcb, control, chk,
1443 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1448 * validate not beyond top FSN if we have seen last
1451 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1452 SCTPDBG(SCTP_DEBUG_XXX,
1453 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1456 sctp_abort_in_reasm(stcb, control, chk,
1458 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1463 * If we reach here, we need to place the new chunk in the
1464 * reassembly for this control.
1466 SCTPDBG(SCTP_DEBUG_XXX,
1467 "chunk is a not first fsn: %u needs to be inserted\n",
1469 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1470 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1472 * This one in queue is bigger than the new
1473 * one, insert the new one before at.
1475 SCTPDBG(SCTP_DEBUG_XXX,
1476 "Insert it before fsn: %u\n",
1478 asoc->size_on_reasm_queue += chk->send_size;
1479 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1480 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1483 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1485 * Gak, He sent me a duplicate str seq
1489 * foo bar, I guess I will just free this
1490 * new guy, should we abort too? FIX ME
1491 * MAYBE? Or it COULD be that the SSN's have
1492 * wrapped. Maybe I should compare to TSN
1493 * somehow... sigh for now just blow away
1496 SCTPDBG(SCTP_DEBUG_XXX,
1497 "Duplicate to fsn: %u -- abort\n",
1499 sctp_abort_in_reasm(stcb, control,
1501 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1505 if (inserted == 0) {
1506 /* Goes on the end */
1507 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1509 asoc->size_on_reasm_queue += chk->send_size;
1510 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1511 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1515 * Ok lets see if we can suck any up into the control structure that
1516 * are in seq if it makes sense.
1520 * If the first fragment has not been seen there is no sense in
1523 if (control->first_frag_seen) {
1524 next_fsn = control->fsn_included + 1;
1525 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1526 if (at->rec.data.fsn == next_fsn) {
1527 /* We can add this one now to the control */
1528 SCTPDBG(SCTP_DEBUG_XXX,
1529 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1532 next_fsn, control->fsn_included);
1533 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1534 sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1535 if (control->on_read_q) {
1539 if (control->end_added && control->pdapi_started) {
1540 if (strm->pd_api_started) {
1541 strm->pd_api_started = 0;
1542 control->pdapi_started = 0;
1544 if (control->on_read_q == 0) {
1545 sctp_add_to_readq(stcb->sctp_ep, stcb,
1547 &stcb->sctp_socket->so_rcv, control->end_added,
1548 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1559 /* Need to wakeup the reader */
1560 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1564 static struct sctp_queued_to_read *
1565 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1567 struct sctp_queued_to_read *control;
1570 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1571 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1576 if (idata_supported) {
1577 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1578 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1583 control = TAILQ_FIRST(&strm->uno_inqueue);
1590 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1591 struct mbuf **m, int offset, int chk_length,
1592 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1593 int *break_flag, int last_chunk, uint8_t chk_type)
1595 /* Process a data chunk */
1596 /* struct sctp_tmit_chunk *chk; */
1597 struct sctp_tmit_chunk *chk;
1598 uint32_t tsn, fsn, gap, mid;
1601 int need_reasm_check = 0;
1603 struct mbuf *op_err;
1604 char msg[SCTP_DIAG_INFO_LEN];
1605 struct sctp_queued_to_read *control = NULL;
1608 struct sctp_stream_reset_list *liste;
1611 int created_control = 0;
1613 if (chk_type == SCTP_IDATA) {
1614 struct sctp_idata_chunk *chunk, chunk_buf;
1616 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1617 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1618 chk_flags = chunk->ch.chunk_flags;
1619 clen = sizeof(struct sctp_idata_chunk);
1620 tsn = ntohl(chunk->dp.tsn);
1621 sid = ntohs(chunk->dp.sid);
1622 mid = ntohl(chunk->dp.mid);
1623 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1625 ppid = chunk->dp.ppid_fsn.ppid;
1627 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1628 ppid = 0xffffffff; /* Use as an invalid value. */
1631 struct sctp_data_chunk *chunk, chunk_buf;
1633 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1634 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1635 chk_flags = chunk->ch.chunk_flags;
1636 clen = sizeof(struct sctp_data_chunk);
1637 tsn = ntohl(chunk->dp.tsn);
1638 sid = ntohs(chunk->dp.sid);
1639 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1641 ppid = chunk->dp.ppid;
1643 if ((size_t)chk_length == clen) {
1645 * Need to send an abort since we had a empty data chunk.
1647 op_err = sctp_generate_no_user_data_cause(tsn);
1648 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1649 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1653 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1654 asoc->send_sack = 1;
1656 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1657 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1658 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1663 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1664 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1665 /* It is a duplicate */
1666 SCTP_STAT_INCR(sctps_recvdupdata);
1667 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1668 /* Record a dup for the next outbound sack */
1669 asoc->dup_tsns[asoc->numduptsns] = tsn;
1672 asoc->send_sack = 1;
1675 /* Calculate the number of TSN's between the base and this TSN */
1676 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1677 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1678 /* Can't hold the bit in the mapping at max array, toss it */
1681 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1682 SCTP_TCB_LOCK_ASSERT(stcb);
1683 if (sctp_expand_mapping_array(asoc, gap)) {
1684 /* Can't expand, drop it */
1688 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1691 /* See if we have received this one already */
1692 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1693 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1694 SCTP_STAT_INCR(sctps_recvdupdata);
1695 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1696 /* Record a dup for the next outbound sack */
1697 asoc->dup_tsns[asoc->numduptsns] = tsn;
1700 asoc->send_sack = 1;
1704 * Check to see about the GONE flag, duplicates would cause a sack
1705 * to be sent up above
1707 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1708 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1709 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1711 * wait a minute, this guy is gone, there is no longer a
1712 * receiver. Send peer an ABORT!
1714 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1715 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1720 * Now before going further we see if there is room. If NOT then we
1721 * MAY let one through only IF this TSN is the one we are waiting
1722 * for on a partial delivery API.
1725 /* Is the stream valid? */
1726 if (sid >= asoc->streamincnt) {
1727 struct sctp_error_invalid_stream *cause;
1729 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1730 0, M_NOWAIT, 1, MT_DATA);
1731 if (op_err != NULL) {
1732 /* add some space up front so prepend will work well */
1733 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1734 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1736 * Error causes are just param's and this one has
1737 * two back to back phdr, one with the error type
1738 * and size, the other with the streamid and a rsvd
1740 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1741 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1742 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1743 cause->stream_id = htons(sid);
1744 cause->reserved = htons(0);
1745 sctp_queue_op_err(stcb, op_err);
1747 SCTP_STAT_INCR(sctps_badsid);
1748 SCTP_TCB_LOCK_ASSERT(stcb);
1749 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1750 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1751 asoc->highest_tsn_inside_nr_map = tsn;
1753 if (tsn == (asoc->cumulative_tsn + 1)) {
1754 /* Update cum-ack */
1755 asoc->cumulative_tsn = tsn;
1760 * If its a fragmented message, lets see if we can find the control
1761 * on the reassembly queues.
1763 if ((chk_type == SCTP_IDATA) &&
1764 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1767 * The first *must* be fsn 0, and other (middle/end) pieces
1768 * can *not* be fsn 0. XXX: This can happen in case of a
1769 * wrap around. Ignore is for now.
1771 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1775 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1776 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1777 chk_flags, control);
1778 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1779 /* See if we can find the re-assembly entity */
1780 if (control != NULL) {
1781 /* We found something, does it belong? */
1782 if (ordered && (mid != control->mid)) {
1783 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1785 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1786 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1787 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1791 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1793 * We can't have a switched order with an
1796 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1800 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1802 * We can't have a switched unordered with a
1805 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1812 * Its a complete segment. Lets validate we don't have a
1813 * re-assembly going on with the same Stream/Seq (for
1814 * ordered) or in the same Stream for unordered.
1816 if (control != NULL) {
1817 if (ordered || asoc->idata_supported) {
1818 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1820 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1823 if ((tsn == control->fsn_included + 1) &&
1824 (control->end_added == 0)) {
1825 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1833 /* now do the tests */
1834 if (((asoc->cnt_on_all_streams +
1835 asoc->cnt_on_reasm_queue +
1836 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1837 (((int)asoc->my_rwnd) <= 0)) {
1839 * When we have NO room in the rwnd we check to make sure
1840 * the reader is doing its job...
1842 if (stcb->sctp_socket->so_rcv.sb_cc) {
1843 /* some to read, wake-up */
1844 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1847 so = SCTP_INP_SO(stcb->sctp_ep);
1848 atomic_add_int(&stcb->asoc.refcnt, 1);
1849 SCTP_TCB_UNLOCK(stcb);
1850 SCTP_SOCKET_LOCK(so, 1);
1851 SCTP_TCB_LOCK(stcb);
1852 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1853 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1854 /* assoc was freed while we were unlocked */
1855 SCTP_SOCKET_UNLOCK(so, 1);
1859 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1860 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1861 SCTP_SOCKET_UNLOCK(so, 1);
1864 /* now is it in the mapping array of what we have accepted? */
1865 if (chk_type == SCTP_DATA) {
1866 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1867 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1868 /* Nope not in the valid range dump it */
1870 sctp_set_rwnd(stcb, asoc);
1871 if ((asoc->cnt_on_all_streams +
1872 asoc->cnt_on_reasm_queue +
1873 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1874 SCTP_STAT_INCR(sctps_datadropchklmt);
1876 SCTP_STAT_INCR(sctps_datadroprwnd);
1882 if (control == NULL) {
1885 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1890 #ifdef SCTP_ASOCLOG_OF_TSNS
1891 SCTP_TCB_LOCK_ASSERT(stcb);
1892 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1893 asoc->tsn_in_at = 0;
1894 asoc->tsn_in_wrapped = 1;
1896 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1897 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1898 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1899 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1900 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1901 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1902 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1903 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1907 * Before we continue lets validate that we are not being fooled by
1908 * an evil attacker. We can only have Nk chunks based on our TSN
1909 * spread allowed by the mapping array N * 8 bits, so there is no
1910 * way our stream sequence numbers could have wrapped. We of course
1911 * only validate the FIRST fragment so the bit must be set.
1913 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1914 (TAILQ_EMPTY(&asoc->resetHead)) &&
1915 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1916 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1917 /* The incoming sseq is behind where we last delivered? */
1918 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1919 mid, asoc->strmin[sid].last_mid_delivered);
1921 if (asoc->idata_supported) {
1922 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
1923 asoc->strmin[sid].last_mid_delivered,
1928 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1929 (uint16_t)asoc->strmin[sid].last_mid_delivered,
1934 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1935 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1936 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1940 if (chk_type == SCTP_IDATA) {
1941 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1943 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1945 if (last_chunk == 0) {
1946 if (chk_type == SCTP_IDATA) {
1947 dmbuf = SCTP_M_COPYM(*m,
1948 (offset + sizeof(struct sctp_idata_chunk)),
1951 dmbuf = SCTP_M_COPYM(*m,
1952 (offset + sizeof(struct sctp_data_chunk)),
1955 #ifdef SCTP_MBUF_LOGGING
1956 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1957 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1961 /* We can steal the last chunk */
1965 /* lop off the top part */
1966 if (chk_type == SCTP_IDATA) {
1967 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1969 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1971 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1972 l_len = SCTP_BUF_LEN(dmbuf);
1975 * need to count up the size hopefully does not hit
1981 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1982 l_len += SCTP_BUF_LEN(lat);
1985 if (l_len > the_len) {
1986 /* Trim the end round bytes off too */
1987 m_adj(dmbuf, -(l_len - the_len));
1990 if (dmbuf == NULL) {
1991 SCTP_STAT_INCR(sctps_nomem);
1995 * Now no matter what, we need a control, get one if we don't have
1996 * one (we may have gotten it above when we found the message was
1999 if (control == NULL) {
2000 sctp_alloc_a_readq(stcb, control);
2001 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2006 if (control == NULL) {
2007 SCTP_STAT_INCR(sctps_nomem);
2010 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2011 control->data = dmbuf;
2012 control->tail_mbuf = NULL;
2013 control->end_added = 1;
2014 control->last_frag_seen = 1;
2015 control->first_frag_seen = 1;
2016 control->fsn_included = fsn;
2017 control->top_fsn = fsn;
2019 created_control = 1;
2021 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2022 chk_flags, ordered, mid, control);
2023 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2024 TAILQ_EMPTY(&asoc->resetHead) &&
2026 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2027 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2028 /* Candidate for express delivery */
2030 * Its not fragmented, No PD-API is up, Nothing in the
2031 * delivery queue, Its un-ordered OR ordered and the next to
2032 * deliver AND nothing else is stuck on the stream queue,
2033 * And there is room for it in the socket buffer. Lets just
2034 * stuff it up the buffer....
2036 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2037 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2038 asoc->highest_tsn_inside_nr_map = tsn;
2040 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2043 sctp_add_to_readq(stcb->sctp_ep, stcb,
2044 control, &stcb->sctp_socket->so_rcv,
2045 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2047 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2048 /* for ordered, bump what we delivered */
2049 asoc->strmin[sid].last_mid_delivered++;
2051 SCTP_STAT_INCR(sctps_recvexpress);
2052 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2053 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2054 SCTP_STR_LOG_FROM_EXPRS_DEL);
2057 goto finish_express_del;
2059 /* Now will we need a chunk too? */
2060 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2061 sctp_alloc_a_chunk(stcb, chk);
2063 /* No memory so we drop the chunk */
2064 SCTP_STAT_INCR(sctps_nomem);
2065 if (last_chunk == 0) {
2066 /* we copied it, free the copy */
2067 sctp_m_freem(dmbuf);
2071 chk->rec.data.tsn = tsn;
2072 chk->no_fr_allowed = 0;
2073 chk->rec.data.fsn = fsn;
2074 chk->rec.data.mid = mid;
2075 chk->rec.data.sid = sid;
2076 chk->rec.data.ppid = ppid;
2077 chk->rec.data.context = stcb->asoc.context;
2078 chk->rec.data.doing_fast_retransmit = 0;
2079 chk->rec.data.rcv_flags = chk_flags;
2081 chk->send_size = the_len;
2083 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2086 atomic_add_int(&net->ref_count, 1);
2089 /* Set the appropriate TSN mark */
2090 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2091 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2092 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2093 asoc->highest_tsn_inside_nr_map = tsn;
2096 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2097 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2098 asoc->highest_tsn_inside_map = tsn;
2101 /* Now is it complete (i.e. not fragmented)? */
2102 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2104 * Special check for when streams are resetting. We could be
2105 * more smart about this and check the actual stream to see
2106 * if it is not being reset.. that way we would not create a
2107 * HOLB when amongst streams being reset and those not being
2111 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2112 SCTP_TSN_GT(tsn, liste->tsn)) {
2114 * yep its past where we need to reset... go ahead
2117 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2119 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2121 struct sctp_queued_to_read *ctlOn, *nctlOn;
2122 unsigned char inserted = 0;
2124 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2125 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2130 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2135 if (inserted == 0) {
2137 * must be put at end, use prevP
2138 * (all setup from loop) to setup
2141 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2144 goto finish_express_del;
2146 if (chk_flags & SCTP_DATA_UNORDERED) {
2147 /* queue directly into socket buffer */
2148 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2150 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2151 sctp_add_to_readq(stcb->sctp_ep, stcb,
2153 &stcb->sctp_socket->so_rcv, 1,
2154 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2157 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2159 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2167 goto finish_express_del;
2169 /* If we reach here its a reassembly */
2170 need_reasm_check = 1;
2171 SCTPDBG(SCTP_DEBUG_XXX,
2172 "Queue data to stream for reasm control: %p MID: %u\n",
2174 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2177 * the assoc is now gone and chk was put onto the reasm
2178 * queue, which has all been freed.
2186 /* Here we tidy up things */
2187 if (tsn == (asoc->cumulative_tsn + 1)) {
2188 /* Update cum-ack */
2189 asoc->cumulative_tsn = tsn;
2195 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2197 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2199 SCTP_STAT_INCR(sctps_recvdata);
2200 /* Set it present please */
2201 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2202 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2204 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2205 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2206 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2208 if (need_reasm_check) {
2209 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2210 need_reasm_check = 0;
2212 /* check the special flag for stream resets */
2213 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2214 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2216 * we have finished working through the backlogged TSN's now
2217 * time to reset streams. 1: call reset function. 2: free
2218 * pending_reply space 3: distribute any chunks in
2219 * pending_reply_queue.
2221 struct sctp_queued_to_read *ctl, *nctl;
2223 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2224 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2225 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2226 SCTP_FREE(liste, SCTP_M_STRESET);
2227 /* sa_ignore FREED_MEMORY */
2228 liste = TAILQ_FIRST(&asoc->resetHead);
2229 if (TAILQ_EMPTY(&asoc->resetHead)) {
2230 /* All can be removed */
2231 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2232 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2233 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag, &need_reasm_check);
2237 if (need_reasm_check) {
2238 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[ctl->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2239 need_reasm_check = 0;
2243 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2244 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2248 * if ctl->sinfo_tsn is <= liste->tsn we can
2249 * process it which is the NOT of
2250 * ctl->sinfo_tsn > liste->tsn
2252 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2253 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag, &need_reasm_check);
2257 if (need_reasm_check) {
2258 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[ctl->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2259 need_reasm_check = 0;
2267 static const int8_t sctp_map_lookup_tab[256] = {
2268 0, 1, 0, 2, 0, 1, 0, 3,
2269 0, 1, 0, 2, 0, 1, 0, 4,
2270 0, 1, 0, 2, 0, 1, 0, 3,
2271 0, 1, 0, 2, 0, 1, 0, 5,
2272 0, 1, 0, 2, 0, 1, 0, 3,
2273 0, 1, 0, 2, 0, 1, 0, 4,
2274 0, 1, 0, 2, 0, 1, 0, 3,
2275 0, 1, 0, 2, 0, 1, 0, 6,
2276 0, 1, 0, 2, 0, 1, 0, 3,
2277 0, 1, 0, 2, 0, 1, 0, 4,
2278 0, 1, 0, 2, 0, 1, 0, 3,
2279 0, 1, 0, 2, 0, 1, 0, 5,
2280 0, 1, 0, 2, 0, 1, 0, 3,
2281 0, 1, 0, 2, 0, 1, 0, 4,
2282 0, 1, 0, 2, 0, 1, 0, 3,
2283 0, 1, 0, 2, 0, 1, 0, 7,
2284 0, 1, 0, 2, 0, 1, 0, 3,
2285 0, 1, 0, 2, 0, 1, 0, 4,
2286 0, 1, 0, 2, 0, 1, 0, 3,
2287 0, 1, 0, 2, 0, 1, 0, 5,
2288 0, 1, 0, 2, 0, 1, 0, 3,
2289 0, 1, 0, 2, 0, 1, 0, 4,
2290 0, 1, 0, 2, 0, 1, 0, 3,
2291 0, 1, 0, 2, 0, 1, 0, 6,
2292 0, 1, 0, 2, 0, 1, 0, 3,
2293 0, 1, 0, 2, 0, 1, 0, 4,
2294 0, 1, 0, 2, 0, 1, 0, 3,
2295 0, 1, 0, 2, 0, 1, 0, 5,
2296 0, 1, 0, 2, 0, 1, 0, 3,
2297 0, 1, 0, 2, 0, 1, 0, 4,
2298 0, 1, 0, 2, 0, 1, 0, 3,
2299 0, 1, 0, 2, 0, 1, 0, 8
2304 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2307 * Now we also need to check the mapping array in a couple of ways.
2308 * 1) Did we move the cum-ack point?
2310 * When you first glance at this you might think that all entries
2311 * that make up the position of the cum-ack would be in the
2312 * nr-mapping array only.. i.e. things up to the cum-ack are always
2313 * deliverable. Thats true with one exception, when its a fragmented
2314 * message we may not deliver the data until some threshold (or all
2315 * of it) is in place. So we must OR the nr_mapping_array and
2316 * mapping_array to get a true picture of the cum-ack.
2318 struct sctp_association *asoc;
2321 int slide_from, slide_end, lgap, distance;
2322 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2326 old_cumack = asoc->cumulative_tsn;
2327 old_base = asoc->mapping_array_base_tsn;
2328 old_highest = asoc->highest_tsn_inside_map;
2330 * We could probably improve this a small bit by calculating the
2331 * offset of the current cum-ack as the starting point.
2334 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2335 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2339 /* there is a 0 bit */
2340 at += sctp_map_lookup_tab[val];
2344 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2346 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2347 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2349 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2350 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2352 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2353 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2354 sctp_print_mapping_array(asoc);
2355 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2356 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2358 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2359 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2362 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2363 highest_tsn = asoc->highest_tsn_inside_nr_map;
2365 highest_tsn = asoc->highest_tsn_inside_map;
2367 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2368 /* The complete array was completed by a single FR */
2369 /* highest becomes the cum-ack */
2375 /* clear the array */
2376 clr = ((at + 7) >> 3);
2377 if (clr > asoc->mapping_array_size) {
2378 clr = asoc->mapping_array_size;
2380 memset(asoc->mapping_array, 0, clr);
2381 memset(asoc->nr_mapping_array, 0, clr);
2383 for (i = 0; i < asoc->mapping_array_size; i++) {
2384 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2385 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2386 sctp_print_mapping_array(asoc);
2390 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2391 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2392 } else if (at >= 8) {
2393 /* we can slide the mapping array down */
2394 /* slide_from holds where we hit the first NON 0xff byte */
2397 * now calculate the ceiling of the move using our highest
2400 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2401 slide_end = (lgap >> 3);
2402 if (slide_end < slide_from) {
2403 sctp_print_mapping_array(asoc);
2405 panic("impossible slide");
2407 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2408 lgap, slide_end, slide_from, at);
2412 if (slide_end > asoc->mapping_array_size) {
2414 panic("would overrun buffer");
2416 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2417 asoc->mapping_array_size, slide_end);
2418 slide_end = asoc->mapping_array_size;
2421 distance = (slide_end - slide_from) + 1;
2422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2423 sctp_log_map(old_base, old_cumack, old_highest,
2424 SCTP_MAP_PREPARE_SLIDE);
2425 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2426 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2428 if (distance + slide_from > asoc->mapping_array_size ||
2431 * Here we do NOT slide forward the array so that
2432 * hopefully when more data comes in to fill it up
2433 * we will be able to slide it forward. Really I
2434 * don't think this should happen :-0
2437 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2438 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2439 (uint32_t)asoc->mapping_array_size,
2440 SCTP_MAP_SLIDE_NONE);
2445 for (ii = 0; ii < distance; ii++) {
2446 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2447 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2450 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2451 asoc->mapping_array[ii] = 0;
2452 asoc->nr_mapping_array[ii] = 0;
2454 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2455 asoc->highest_tsn_inside_map += (slide_from << 3);
2457 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2458 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2460 asoc->mapping_array_base_tsn += (slide_from << 3);
2461 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2462 sctp_log_map(asoc->mapping_array_base_tsn,
2463 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2464 SCTP_MAP_SLIDE_RESULT);
2471 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2473 struct sctp_association *asoc;
2474 uint32_t highest_tsn;
2477 sctp_slide_mapping_arrays(stcb);
2479 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2480 highest_tsn = asoc->highest_tsn_inside_nr_map;
2482 highest_tsn = asoc->highest_tsn_inside_map;
2484 /* Is there a gap now? */
2485 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2488 * Now we need to see if we need to queue a sack or just start the
2489 * timer (if allowed).
2491 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2493 * Ok special case, in SHUTDOWN-SENT case. here we maker
2494 * sure SACK timer is off and instead send a SHUTDOWN and a
2497 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2498 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2499 stcb->sctp_ep, stcb, NULL,
2500 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2502 sctp_send_shutdown(stcb,
2503 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2505 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2509 * CMT DAC algorithm: increase number of packets received
2512 stcb->asoc.cmt_dac_pkts_rcvd++;
2514 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2516 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2518 (stcb->asoc.numduptsns) || /* we have dup's */
2519 (is_a_gap) || /* is still a gap */
2520 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2521 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2524 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2525 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2526 (stcb->asoc.send_sack == 0) &&
2527 (stcb->asoc.numduptsns == 0) &&
2528 (stcb->asoc.delayed_ack) &&
2529 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2532 * CMT DAC algorithm: With CMT, delay acks
2533 * even in the face of
2535 * reordering. Therefore, if acks that do
2536 * not have to be sent because of the above
2537 * reasons, will be delayed. That is, acks
2538 * that would have been sent due to gap
2539 * reports will be delayed with DAC. Start
2540 * the delayed ack timer.
2542 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2543 stcb->sctp_ep, stcb, NULL);
2546 * Ok we must build a SACK since the timer
2547 * is pending, we got our first packet OR
2548 * there are gaps or duplicates.
2550 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2551 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2554 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2555 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2556 stcb->sctp_ep, stcb, NULL);
2563 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2564 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2565 struct sctp_nets *net, uint32_t *high_tsn)
2567 struct sctp_chunkhdr *ch, chunk_buf;
2568 struct sctp_association *asoc;
2569 int num_chunks = 0; /* number of control chunks processed */
2571 int chk_length, break_flag, last_chunk;
2572 int abort_flag = 0, was_a_gap;
2574 uint32_t highest_tsn;
2577 sctp_set_rwnd(stcb, &stcb->asoc);
2580 SCTP_TCB_LOCK_ASSERT(stcb);
2582 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2583 highest_tsn = asoc->highest_tsn_inside_nr_map;
2585 highest_tsn = asoc->highest_tsn_inside_map;
2587 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2589 * setup where we got the last DATA packet from for any SACK that
2590 * may need to go out. Don't bump the net. This is done ONLY when a
2591 * chunk is assigned.
2593 asoc->last_data_chunk_from = net;
2596 * Now before we proceed we must figure out if this is a wasted
2597 * cluster... i.e. it is a small packet sent in and yet the driver
2598 * underneath allocated a full cluster for it. If so we must copy it
2599 * to a smaller mbuf and free up the cluster mbuf. This will help
2600 * with cluster starvation. Note for __Panda__ we don't do this
2601 * since it has clusters all the way down to 64 bytes.
2603 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2604 /* we only handle mbufs that are singletons.. not chains */
2605 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2607 /* ok lets see if we can copy the data up */
2610 /* get the pointers and copy */
2611 to = mtod(m, caddr_t *);
2612 from = mtod((*mm), caddr_t *);
2613 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2614 /* copy the length and free up the old */
2615 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2617 /* success, back copy */
2620 /* We are in trouble in the mbuf world .. yikes */
2624 /* get pointer to the first chunk header */
2625 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2626 sizeof(struct sctp_chunkhdr), (uint8_t *)&chunk_buf);
2631 * process all DATA chunks...
2633 *high_tsn = asoc->cumulative_tsn;
2635 asoc->data_pkts_seen++;
2636 while (stop_proc == 0) {
2637 /* validate chunk length */
2638 chk_length = ntohs(ch->chunk_length);
2639 if (length - *offset < chk_length) {
2640 /* all done, mutulated chunk */
2644 if ((asoc->idata_supported == 1) &&
2645 (ch->chunk_type == SCTP_DATA)) {
2646 struct mbuf *op_err;
2647 char msg[SCTP_DIAG_INFO_LEN];
2649 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2650 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2651 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2652 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2655 if ((asoc->idata_supported == 0) &&
2656 (ch->chunk_type == SCTP_IDATA)) {
2657 struct mbuf *op_err;
2658 char msg[SCTP_DIAG_INFO_LEN];
2660 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2661 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2662 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2663 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2666 if ((ch->chunk_type == SCTP_DATA) ||
2667 (ch->chunk_type == SCTP_IDATA)) {
2670 if (ch->chunk_type == SCTP_DATA) {
2671 clen = sizeof(struct sctp_data_chunk);
2673 clen = sizeof(struct sctp_idata_chunk);
2675 if (chk_length < clen) {
2677 * Need to send an abort since we had a
2678 * invalid data chunk.
2680 struct mbuf *op_err;
2681 char msg[SCTP_DIAG_INFO_LEN];
2683 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2685 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2686 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2687 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2690 #ifdef SCTP_AUDITING_ENABLED
2691 sctp_audit_log(0xB1, 0);
2693 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2698 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2699 chk_length, net, high_tsn, &abort_flag, &break_flag,
2700 last_chunk, ch->chunk_type)) {
2708 * Set because of out of rwnd space and no
2709 * drop rep space left.
2715 /* not a data chunk in the data region */
2716 switch (ch->chunk_type) {
2717 case SCTP_INITIATION:
2718 case SCTP_INITIATION_ACK:
2719 case SCTP_SELECTIVE_ACK:
2720 case SCTP_NR_SELECTIVE_ACK:
2721 case SCTP_HEARTBEAT_REQUEST:
2722 case SCTP_HEARTBEAT_ACK:
2723 case SCTP_ABORT_ASSOCIATION:
2725 case SCTP_SHUTDOWN_ACK:
2726 case SCTP_OPERATION_ERROR:
2727 case SCTP_COOKIE_ECHO:
2728 case SCTP_COOKIE_ACK:
2731 case SCTP_SHUTDOWN_COMPLETE:
2732 case SCTP_AUTHENTICATION:
2733 case SCTP_ASCONF_ACK:
2734 case SCTP_PACKET_DROPPED:
2735 case SCTP_STREAM_RESET:
2736 case SCTP_FORWARD_CUM_TSN:
2740 * Now, what do we do with KNOWN
2741 * chunks that are NOT in the right
2744 * For now, I do nothing but ignore
2745 * them. We may later want to add
2746 * sysctl stuff to switch out and do
2747 * either an ABORT() or possibly
2750 struct mbuf *op_err;
2751 char msg[SCTP_DIAG_INFO_LEN];
2753 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2755 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2756 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2760 /* unknown chunk type, use bit rules */
2761 if (ch->chunk_type & 0x40) {
2762 /* Add a error report to the queue */
2763 struct mbuf *op_err;
2764 struct sctp_gen_error_cause *cause;
2766 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2767 0, M_NOWAIT, 1, MT_DATA);
2768 if (op_err != NULL) {
2769 cause = mtod(op_err, struct sctp_gen_error_cause *);
2770 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2771 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2772 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2773 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2774 if (SCTP_BUF_NEXT(op_err) != NULL) {
2775 sctp_queue_op_err(stcb, op_err);
2777 sctp_m_freem(op_err);
2781 if ((ch->chunk_type & 0x80) == 0) {
2782 /* discard the rest of this packet */
2784 } /* else skip this bad chunk and
2787 } /* switch of chunk type */
2789 *offset += SCTP_SIZE32(chk_length);
2790 if ((*offset >= length) || stop_proc) {
2791 /* no more data left in the mbuf chain */
2795 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2796 sizeof(struct sctp_chunkhdr), (uint8_t *)&chunk_buf);
2805 * we need to report rwnd overrun drops.
2807 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2811 * Did we get data, if so update the time for auto-close and
2812 * give peer credit for being alive.
2814 SCTP_STAT_INCR(sctps_recvpktwithdata);
2815 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2816 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2817 stcb->asoc.overall_error_count,
2819 SCTP_FROM_SCTP_INDATA,
2822 stcb->asoc.overall_error_count = 0;
2823 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2825 /* now service all of the reassm queue if needed */
2826 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2827 /* Assure that we ack right away */
2828 stcb->asoc.send_sack = 1;
2830 /* Start a sack timer or QUEUE a SACK for sending */
2831 sctp_sack_check(stcb, was_a_gap);
2836 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2837 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2839 uint32_t *biggest_newly_acked_tsn,
2840 uint32_t *this_sack_lowest_newack,
2843 struct sctp_tmit_chunk *tp1;
2844 unsigned int theTSN;
2845 int j, wake_him = 0, circled = 0;
2847 /* Recover the tp1 we last saw */
2850 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2852 for (j = frag_strt; j <= frag_end; j++) {
2853 theTSN = j + last_tsn;
2855 if (tp1->rec.data.doing_fast_retransmit)
2859 * CMT: CUCv2 algorithm. For each TSN being
2860 * processed from the sent queue, track the
2861 * next expected pseudo-cumack, or
2862 * rtx_pseudo_cumack, if required. Separate
2863 * cumack trackers for first transmissions,
2864 * and retransmissions.
2866 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2867 (tp1->whoTo->find_pseudo_cumack == 1) &&
2868 (tp1->snd_count == 1)) {
2869 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2870 tp1->whoTo->find_pseudo_cumack = 0;
2872 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2873 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2874 (tp1->snd_count > 1)) {
2875 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2876 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2878 if (tp1->rec.data.tsn == theTSN) {
2879 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2881 * must be held until
2884 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2886 * If it is less than RESEND, it is
2887 * now no-longer in flight.
2888 * Higher values may already be set
2889 * via previous Gap Ack Blocks...
2890 * i.e. ACKED or RESEND.
2892 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2893 *biggest_newly_acked_tsn)) {
2894 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
2897 * CMT: SFR algo (and HTNA) - set
2898 * saw_newack to 1 for dest being
2899 * newly acked. update
2900 * this_sack_highest_newack if
2903 if (tp1->rec.data.chunk_was_revoked == 0)
2904 tp1->whoTo->saw_newack = 1;
2906 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2907 tp1->whoTo->this_sack_highest_newack)) {
2908 tp1->whoTo->this_sack_highest_newack =
2912 * CMT DAC algo: also update
2913 * this_sack_lowest_newack
2915 if (*this_sack_lowest_newack == 0) {
2916 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2917 sctp_log_sack(*this_sack_lowest_newack,
2922 SCTP_LOG_TSN_ACKED);
2924 *this_sack_lowest_newack = tp1->rec.data.tsn;
2927 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2928 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2929 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2930 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2931 * Separate pseudo_cumack trackers for first transmissions and
2934 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
2935 if (tp1->rec.data.chunk_was_revoked == 0) {
2936 tp1->whoTo->new_pseudo_cumack = 1;
2938 tp1->whoTo->find_pseudo_cumack = 1;
2940 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2941 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
2943 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
2944 if (tp1->rec.data.chunk_was_revoked == 0) {
2945 tp1->whoTo->new_pseudo_cumack = 1;
2947 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2949 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2950 sctp_log_sack(*biggest_newly_acked_tsn,
2955 SCTP_LOG_TSN_ACKED);
2957 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2958 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2959 tp1->whoTo->flight_size,
2961 (uint32_t)(uintptr_t)tp1->whoTo,
2964 sctp_flight_size_decrease(tp1);
2965 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2966 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2969 sctp_total_flight_decrease(stcb, tp1);
2971 tp1->whoTo->net_ack += tp1->send_size;
2972 if (tp1->snd_count < 2) {
2974 * True non-retransmited chunk
2976 tp1->whoTo->net_ack2 += tp1->send_size;
2984 sctp_calculate_rto(stcb,
2987 &tp1->sent_rcv_time,
2988 sctp_align_safe_nocopy,
2989 SCTP_RTT_FROM_DATA);
2992 if (tp1->whoTo->rto_needed == 0) {
2993 tp1->whoTo->rto_needed = 1;
2999 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3000 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3001 stcb->asoc.this_sack_highest_gap)) {
3002 stcb->asoc.this_sack_highest_gap =
3005 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3006 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3007 #ifdef SCTP_AUDITING_ENABLED
3008 sctp_audit_log(0xB2,
3009 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3014 * All chunks NOT UNSENT fall through here and are marked
3015 * (leave PR-SCTP ones that are to skip alone though)
3017 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3018 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3019 tp1->sent = SCTP_DATAGRAM_MARKED;
3021 if (tp1->rec.data.chunk_was_revoked) {
3022 /* deflate the cwnd */
3023 tp1->whoTo->cwnd -= tp1->book_size;
3024 tp1->rec.data.chunk_was_revoked = 0;
3026 /* NR Sack code here */
3028 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3029 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3030 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3033 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3036 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3037 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3038 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3039 stcb->asoc.trigger_reset = 1;
3041 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3047 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3048 sctp_m_freem(tp1->data);
3055 } /* if (tp1->tsn == theTSN) */
3056 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3059 tp1 = TAILQ_NEXT(tp1, sctp_next);
3060 if ((tp1 == NULL) && (circled == 0)) {
3062 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3064 } /* end while (tp1) */
3067 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3069 /* In case the fragments were not in order we must reset */
3070 } /* end for (j = fragStart */
3072 return (wake_him); /* Return value only used for nr-sack */
3077 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3078 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3079 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3080 int num_seg, int num_nr_seg, int *rto_ok)
3082 struct sctp_gap_ack_block *frag, block;
3083 struct sctp_tmit_chunk *tp1;
3088 uint16_t frag_strt, frag_end, prev_frag_end;
3090 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3094 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3097 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3099 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3100 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3101 *offset += sizeof(block);
3103 return (chunk_freed);
3105 frag_strt = ntohs(frag->start);
3106 frag_end = ntohs(frag->end);
3108 if (frag_strt > frag_end) {
3109 /* This gap report is malformed, skip it. */
3112 if (frag_strt <= prev_frag_end) {
3113 /* This gap report is not in order, so restart. */
3114 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3116 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3117 *biggest_tsn_acked = last_tsn + frag_end;
3124 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3125 non_revocable, &num_frs, biggest_newly_acked_tsn,
3126 this_sack_lowest_newack, rto_ok)) {
3129 prev_frag_end = frag_end;
3131 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3133 sctp_log_fr(*biggest_tsn_acked,
3134 *biggest_newly_acked_tsn,
3135 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3137 return (chunk_freed);
3141 sctp_check_for_revoked(struct sctp_tcb *stcb,
3142 struct sctp_association *asoc, uint32_t cumack,
3143 uint32_t biggest_tsn_acked)
3145 struct sctp_tmit_chunk *tp1;
3147 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3148 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3150 * ok this guy is either ACK or MARKED. If it is
3151 * ACKED it has been previously acked but not this
3152 * time i.e. revoked. If it is MARKED it was ACK'ed
3155 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3158 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3159 /* it has been revoked */
3160 tp1->sent = SCTP_DATAGRAM_SENT;
3161 tp1->rec.data.chunk_was_revoked = 1;
3163 * We must add this stuff back in to assure
3164 * timers and such get started.
3166 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3167 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3168 tp1->whoTo->flight_size,
3170 (uint32_t)(uintptr_t)tp1->whoTo,
3173 sctp_flight_size_increase(tp1);
3174 sctp_total_flight_increase(stcb, tp1);
3176 * We inflate the cwnd to compensate for our
3177 * artificial inflation of the flight_size.
3179 tp1->whoTo->cwnd += tp1->book_size;
3180 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3181 sctp_log_sack(asoc->last_acked_seq,
3186 SCTP_LOG_TSN_REVOKED);
3188 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3189 /* it has been re-acked in this SACK */
3190 tp1->sent = SCTP_DATAGRAM_ACKED;
3193 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3200 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3201 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3203 struct sctp_tmit_chunk *tp1;
3204 int strike_flag = 0;
3206 int tot_retrans = 0;
3207 uint32_t sending_seq;
3208 struct sctp_nets *net;
3209 int num_dests_sacked = 0;
3212 * select the sending_seq, this is either the next thing ready to be
3213 * sent but not transmitted, OR, the next seq we assign.
3215 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3217 sending_seq = asoc->sending_seq;
3219 sending_seq = tp1->rec.data.tsn;
3222 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3223 if ((asoc->sctp_cmt_on_off > 0) &&
3224 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3225 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3226 if (net->saw_newack)
3230 if (stcb->asoc.prsctp_supported) {
3231 (void)SCTP_GETTIME_TIMEVAL(&now);
3233 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3235 if (tp1->no_fr_allowed) {
3236 /* this one had a timeout or something */
3239 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3240 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3241 sctp_log_fr(biggest_tsn_newly_acked,
3244 SCTP_FR_LOG_CHECK_STRIKE);
3246 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3247 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3251 if (stcb->asoc.prsctp_supported) {
3252 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3253 /* Is it expired? */
3254 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3255 /* Yes so drop it */
3256 if (tp1->data != NULL) {
3257 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3258 SCTP_SO_NOT_LOCKED);
3264 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
3265 /* we are beyond the tsn in the sack */
3268 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3269 /* either a RESEND, ACKED, or MARKED */
3271 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3272 /* Continue strikin FWD-TSN chunks */
3273 tp1->rec.data.fwd_tsn_cnt++;
3278 * CMT : SFR algo (covers part of DAC and HTNA as well)
3280 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3282 * No new acks were receieved for data sent to this
3283 * dest. Therefore, according to the SFR algo for
3284 * CMT, no data sent to this dest can be marked for
3285 * FR using this SACK.
3288 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
3289 tp1->whoTo->this_sack_highest_newack)) {
3291 * CMT: New acks were receieved for data sent to
3292 * this dest. But no new acks were seen for data
3293 * sent after tp1. Therefore, according to the SFR
3294 * algo for CMT, tp1 cannot be marked for FR using
3295 * this SACK. This step covers part of the DAC algo
3296 * and the HTNA algo as well.
3301 * Here we check to see if we were have already done a FR
3302 * and if so we see if the biggest TSN we saw in the sack is
3303 * smaller than the recovery point. If so we don't strike
3304 * the tsn... otherwise we CAN strike the TSN.
3307 * @@@ JRI: Check for CMT if (accum_moved &&
3308 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3311 if (accum_moved && asoc->fast_retran_loss_recovery) {
3313 * Strike the TSN if in fast-recovery and cum-ack
3316 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3317 sctp_log_fr(biggest_tsn_newly_acked,
3320 SCTP_FR_LOG_STRIKE_CHUNK);
3322 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3325 if ((asoc->sctp_cmt_on_off > 0) &&
3326 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3328 * CMT DAC algorithm: If SACK flag is set to
3329 * 0, then lowest_newack test will not pass
3330 * because it would have been set to the
3331 * cumack earlier. If not already to be
3332 * rtx'd, If not a mixed sack and if tp1 is
3333 * not between two sacked TSNs, then mark by
3334 * one more. NOTE that we are marking by one
3335 * additional time since the SACK DAC flag
3336 * indicates that two packets have been
3337 * received after this missing TSN.
3339 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3340 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3341 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3342 sctp_log_fr(16 + num_dests_sacked,
3345 SCTP_FR_LOG_STRIKE_CHUNK);
3350 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3351 (asoc->sctp_cmt_on_off == 0)) {
3353 * For those that have done a FR we must take
3354 * special consideration if we strike. I.e the
3355 * biggest_newly_acked must be higher than the
3356 * sending_seq at the time we did the FR.
3359 #ifdef SCTP_FR_TO_ALTERNATE
3361 * If FR's go to new networks, then we must only do
3362 * this for singly homed asoc's. However if the FR's
3363 * go to the same network (Armando's work) then its
3364 * ok to FR multiple times.
3372 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3373 tp1->rec.data.fast_retran_tsn)) {
3375 * Strike the TSN, since this ack is
3376 * beyond where things were when we
3379 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3380 sctp_log_fr(biggest_tsn_newly_acked,
3383 SCTP_FR_LOG_STRIKE_CHUNK);
3385 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3389 if ((asoc->sctp_cmt_on_off > 0) &&
3390 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3392 * CMT DAC algorithm: If
3393 * SACK flag is set to 0,
3394 * then lowest_newack test
3395 * will not pass because it
3396 * would have been set to
3397 * the cumack earlier. If
3398 * not already to be rtx'd,
3399 * If not a mixed sack and
3400 * if tp1 is not between two
3401 * sacked TSNs, then mark by
3402 * one more. NOTE that we
3403 * are marking by one
3404 * additional time since the
3405 * SACK DAC flag indicates
3406 * that two packets have
3407 * been received after this
3410 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3411 (num_dests_sacked == 1) &&
3412 SCTP_TSN_GT(this_sack_lowest_newack,
3413 tp1->rec.data.tsn)) {
3414 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3415 sctp_log_fr(32 + num_dests_sacked,
3418 SCTP_FR_LOG_STRIKE_CHUNK);
3420 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3428 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3431 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3432 biggest_tsn_newly_acked)) {
3434 * We don't strike these: This is the HTNA
3435 * algorithm i.e. we don't strike If our TSN is
3436 * larger than the Highest TSN Newly Acked.
3440 /* Strike the TSN */
3441 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3442 sctp_log_fr(biggest_tsn_newly_acked,
3445 SCTP_FR_LOG_STRIKE_CHUNK);
3447 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3450 if ((asoc->sctp_cmt_on_off > 0) &&
3451 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3453 * CMT DAC algorithm: If SACK flag is set to
3454 * 0, then lowest_newack test will not pass
3455 * because it would have been set to the
3456 * cumack earlier. If not already to be
3457 * rtx'd, If not a mixed sack and if tp1 is
3458 * not between two sacked TSNs, then mark by
3459 * one more. NOTE that we are marking by one
3460 * additional time since the SACK DAC flag
3461 * indicates that two packets have been
3462 * received after this missing TSN.
3464 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3465 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3466 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3467 sctp_log_fr(48 + num_dests_sacked,
3470 SCTP_FR_LOG_STRIKE_CHUNK);
3476 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3477 struct sctp_nets *alt;
3479 /* fix counts and things */
3480 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3481 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3482 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3484 (uint32_t)(uintptr_t)tp1->whoTo,
3488 tp1->whoTo->net_ack++;
3489 sctp_flight_size_decrease(tp1);
3490 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3491 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3495 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3496 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3497 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3499 /* add back to the rwnd */
3500 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3502 /* remove from the total flight */
3503 sctp_total_flight_decrease(stcb, tp1);
3505 if ((stcb->asoc.prsctp_supported) &&
3506 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3508 * Has it been retransmitted tv_sec times? -
3509 * we store the retran count there.
3511 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3512 /* Yes, so drop it */
3513 if (tp1->data != NULL) {
3514 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3515 SCTP_SO_NOT_LOCKED);
3517 /* Make sure to flag we had a FR */
3518 tp1->whoTo->net_ack++;
3523 * SCTP_PRINTF("OK, we are now ready to FR this
3526 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3527 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3531 /* This is a subsequent FR */
3532 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3534 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3535 if (asoc->sctp_cmt_on_off > 0) {
3537 * CMT: Using RTX_SSTHRESH policy for CMT.
3538 * If CMT is being used, then pick dest with
3539 * largest ssthresh for any retransmission.
3541 tp1->no_fr_allowed = 1;
3543 /* sa_ignore NO_NULL_CHK */
3544 if (asoc->sctp_cmt_pf > 0) {
3546 * JRS 5/18/07 - If CMT PF is on,
3547 * use the PF version of
3550 alt = sctp_find_alternate_net(stcb, alt, 2);
3553 * JRS 5/18/07 - If only CMT is on,
3554 * use the CMT version of
3557 /* sa_ignore NO_NULL_CHK */
3558 alt = sctp_find_alternate_net(stcb, alt, 1);
3564 * CUCv2: If a different dest is picked for
3565 * the retransmission, then new
3566 * (rtx-)pseudo_cumack needs to be tracked
3567 * for orig dest. Let CUCv2 track new (rtx-)
3568 * pseudo-cumack always.
3571 tp1->whoTo->find_pseudo_cumack = 1;
3572 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3574 } else {/* CMT is OFF */
3576 #ifdef SCTP_FR_TO_ALTERNATE
3577 /* Can we find an alternate? */
3578 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3581 * default behavior is to NOT retransmit
3582 * FR's to an alternate. Armando Caro's
3583 * paper details why.
3589 tp1->rec.data.doing_fast_retransmit = 1;
3591 /* mark the sending seq for possible subsequent FR's */
3593 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3594 * (uint32_t)tpi->rec.data.tsn);
3596 if (TAILQ_EMPTY(&asoc->send_queue)) {
3598 * If the queue of send is empty then its
3599 * the next sequence number that will be
3600 * assigned so we subtract one from this to
3601 * get the one we last sent.
3603 tp1->rec.data.fast_retran_tsn = sending_seq;
3606 * If there are chunks on the send queue
3607 * (unsent data that has made it from the
3608 * stream queues but not out the door, we
3609 * take the first one (which will have the
3610 * lowest TSN) and subtract one to get the
3613 struct sctp_tmit_chunk *ttt;
3615 ttt = TAILQ_FIRST(&asoc->send_queue);
3616 tp1->rec.data.fast_retran_tsn =
3622 * this guy had a RTO calculation pending on
3625 if ((tp1->whoTo != NULL) &&
3626 (tp1->whoTo->rto_needed == 0)) {
3627 tp1->whoTo->rto_needed = 1;
3631 if (alt != tp1->whoTo) {
3632 /* yes, there is an alternate. */
3633 sctp_free_remote_addr(tp1->whoTo);
3634 /* sa_ignore FREED_MEMORY */
3636 atomic_add_int(&alt->ref_count, 1);
3642 struct sctp_tmit_chunk *
3643 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3644 struct sctp_association *asoc)
3646 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3650 if (asoc->prsctp_supported == 0) {
3653 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3654 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3655 tp1->sent != SCTP_DATAGRAM_RESEND &&
3656 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3657 /* no chance to advance, out of here */
3660 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3661 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3662 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3663 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3664 asoc->advanced_peer_ack_point,
3665 tp1->rec.data.tsn, 0, 0);
3668 if (!PR_SCTP_ENABLED(tp1->flags)) {
3670 * We can't fwd-tsn past any that are reliable aka
3671 * retransmitted until the asoc fails.
3676 (void)SCTP_GETTIME_TIMEVAL(&now);
3680 * now we got a chunk which is marked for another
3681 * retransmission to a PR-stream but has run out its chances
3682 * already maybe OR has been marked to skip now. Can we skip
3683 * it if its a resend?
3685 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3686 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3688 * Now is this one marked for resend and its time is
3691 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3692 /* Yes so drop it */
3694 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3695 1, SCTP_SO_NOT_LOCKED);
3699 * No, we are done when hit one for resend
3700 * whos time as not expired.
3706 * Ok now if this chunk is marked to drop it we can clean up
3707 * the chunk, advance our peer ack point and we can check
3710 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3711 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3712 /* advance PeerAckPoint goes forward */
3713 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3714 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3716 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3717 /* No update but we do save the chk */
3722 * If it is still in RESEND we can advance no
3732 sctp_fs_audit(struct sctp_association *asoc)
3734 struct sctp_tmit_chunk *chk;
3735 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3738 int entry_flight, entry_cnt;
3743 entry_flight = asoc->total_flight;
3744 entry_cnt = asoc->total_flight_count;
3746 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3749 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3750 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3751 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3756 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3758 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3760 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3767 if ((inflight > 0) || (inbetween > 0)) {
3769 panic("Flight size-express incorrect? \n");
3771 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3772 entry_flight, entry_cnt);
3774 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3775 inflight, inbetween, resend, above, acked);
3784 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3785 struct sctp_association *asoc,
3786 struct sctp_tmit_chunk *tp1)
3788 tp1->window_probe = 0;
3789 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3790 /* TSN's skipped we do NOT move back. */
3791 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3792 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3794 (uint32_t)(uintptr_t)tp1->whoTo,
3798 /* First setup this by shrinking flight */
3799 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3800 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3803 sctp_flight_size_decrease(tp1);
3804 sctp_total_flight_decrease(stcb, tp1);
3805 /* Now mark for resend */
3806 tp1->sent = SCTP_DATAGRAM_RESEND;
3807 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3809 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3810 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3811 tp1->whoTo->flight_size,
3813 (uint32_t)(uintptr_t)tp1->whoTo,
3819 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3820 uint32_t rwnd, int *abort_now, int ecne_seen)
3822 struct sctp_nets *net;
3823 struct sctp_association *asoc;
3824 struct sctp_tmit_chunk *tp1, *tp2;
3826 int win_probe_recovery = 0;
3827 int win_probe_recovered = 0;
3828 int j, done_once = 0;
3832 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3833 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3834 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3836 SCTP_TCB_LOCK_ASSERT(stcb);
3837 #ifdef SCTP_ASOCLOG_OF_TSNS
3838 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3839 stcb->asoc.cumack_log_at++;
3840 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3841 stcb->asoc.cumack_log_at = 0;
3845 old_rwnd = asoc->peers_rwnd;
3846 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3849 } else if (asoc->last_acked_seq == cumack) {
3850 /* Window update sack */
3851 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3852 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3853 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3854 /* SWS sender side engages */
3855 asoc->peers_rwnd = 0;
3857 if (asoc->peers_rwnd > old_rwnd) {
3862 /* First setup for CC stuff */
3863 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3864 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3865 /* Drag along the window_tsn for cwr's */
3866 net->cwr_window_tsn = cumack;
3868 net->prev_cwnd = net->cwnd;
3873 * CMT: Reset CUC and Fast recovery algo variables before
3876 net->new_pseudo_cumack = 0;
3877 net->will_exit_fast_recovery = 0;
3878 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3879 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3882 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3883 tp1 = TAILQ_LAST(&asoc->sent_queue,
3884 sctpchunk_listhead);
3885 send_s = tp1->rec.data.tsn + 1;
3887 send_s = asoc->sending_seq;
3889 if (SCTP_TSN_GE(cumack, send_s)) {
3890 struct mbuf *op_err;
3891 char msg[SCTP_DIAG_INFO_LEN];
3895 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3897 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3898 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3899 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3902 asoc->this_sack_highest_gap = cumack;
3903 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3904 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3905 stcb->asoc.overall_error_count,
3907 SCTP_FROM_SCTP_INDATA,
3910 stcb->asoc.overall_error_count = 0;
3911 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3912 /* process the new consecutive TSN first */
3913 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3914 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
3915 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3916 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3918 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3920 * If it is less than ACKED, it is
3921 * now no-longer in flight. Higher
3922 * values may occur during marking
3924 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3925 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3926 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3927 tp1->whoTo->flight_size,
3929 (uint32_t)(uintptr_t)tp1->whoTo,
3932 sctp_flight_size_decrease(tp1);
3933 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3934 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3937 /* sa_ignore NO_NULL_CHK */
3938 sctp_total_flight_decrease(stcb, tp1);
3940 tp1->whoTo->net_ack += tp1->send_size;
3941 if (tp1->snd_count < 2) {
3943 * True non-retransmited
3946 tp1->whoTo->net_ack2 +=
3949 /* update RTO too? */
3957 sctp_calculate_rto(stcb,
3959 &tp1->sent_rcv_time,
3960 sctp_align_safe_nocopy,
3961 SCTP_RTT_FROM_DATA);
3964 if (tp1->whoTo->rto_needed == 0) {
3965 tp1->whoTo->rto_needed = 1;
3971 * CMT: CUCv2 algorithm. From the
3972 * cumack'd TSNs, for each TSN being
3973 * acked for the first time, set the
3974 * following variables for the
3975 * corresp destination.
3976 * new_pseudo_cumack will trigger a
3978 * find_(rtx_)pseudo_cumack will
3979 * trigger search for the next
3980 * expected (rtx-)pseudo-cumack.
3982 tp1->whoTo->new_pseudo_cumack = 1;
3983 tp1->whoTo->find_pseudo_cumack = 1;
3984 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3986 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3987 /* sa_ignore NO_NULL_CHK */
3988 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3991 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3992 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3994 if (tp1->rec.data.chunk_was_revoked) {
3995 /* deflate the cwnd */
3996 tp1->whoTo->cwnd -= tp1->book_size;
3997 tp1->rec.data.chunk_was_revoked = 0;
3999 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4000 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4001 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4004 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4008 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4009 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4010 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4011 asoc->trigger_reset = 1;
4013 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4015 /* sa_ignore NO_NULL_CHK */
4016 sctp_free_bufspace(stcb, asoc, tp1, 1);
4017 sctp_m_freem(tp1->data);
4020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4021 sctp_log_sack(asoc->last_acked_seq,
4026 SCTP_LOG_FREE_SENT);
4028 asoc->sent_queue_cnt--;
4029 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4036 /* sa_ignore NO_NULL_CHK */
4037 if (stcb->sctp_socket) {
4038 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4042 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4043 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4044 /* sa_ignore NO_NULL_CHK */
4045 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4047 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4048 so = SCTP_INP_SO(stcb->sctp_ep);
4049 atomic_add_int(&stcb->asoc.refcnt, 1);
4050 SCTP_TCB_UNLOCK(stcb);
4051 SCTP_SOCKET_LOCK(so, 1);
4052 SCTP_TCB_LOCK(stcb);
4053 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4054 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4055 /* assoc was freed while we were unlocked */
4056 SCTP_SOCKET_UNLOCK(so, 1);
4060 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4061 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4062 SCTP_SOCKET_UNLOCK(so, 1);
4065 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4066 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4070 /* JRS - Use the congestion control given in the CC module */
4071 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4072 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4073 if (net->net_ack2 > 0) {
4075 * Karn's rule applies to clearing error
4076 * count, this is optional.
4078 net->error_count = 0;
4079 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4080 /* addr came good */
4081 net->dest_state |= SCTP_ADDR_REACHABLE;
4082 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4083 0, (void *)net, SCTP_SO_NOT_LOCKED);
4085 if (net == stcb->asoc.primary_destination) {
4086 if (stcb->asoc.alternate) {
4088 * release the alternate,
4091 sctp_free_remote_addr(stcb->asoc.alternate);
4092 stcb->asoc.alternate = NULL;
4095 if (net->dest_state & SCTP_ADDR_PF) {
4096 net->dest_state &= ~SCTP_ADDR_PF;
4097 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4098 stcb->sctp_ep, stcb, net,
4099 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4100 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4101 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4102 /* Done with this net */
4105 /* restore any doubled timers */
4106 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4107 if (net->RTO < stcb->asoc.minrto) {
4108 net->RTO = stcb->asoc.minrto;
4110 if (net->RTO > stcb->asoc.maxrto) {
4111 net->RTO = stcb->asoc.maxrto;
4115 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4117 asoc->last_acked_seq = cumack;
4119 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4120 /* nothing left in-flight */
4121 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4122 net->flight_size = 0;
4123 net->partial_bytes_acked = 0;
4125 asoc->total_flight = 0;
4126 asoc->total_flight_count = 0;
4129 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4130 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4131 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4132 /* SWS sender side engages */
4133 asoc->peers_rwnd = 0;
4135 if (asoc->peers_rwnd > old_rwnd) {
4136 win_probe_recovery = 1;
4138 /* Now assure a timer where data is queued at */
4141 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4144 if (win_probe_recovery && (net->window_probe)) {
4145 win_probe_recovered = 1;
4147 * Find first chunk that was used with window probe
4148 * and clear the sent
4150 /* sa_ignore FREED_MEMORY */
4151 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4152 if (tp1->window_probe) {
4153 /* move back to data send queue */
4154 sctp_window_probe_recovery(stcb, asoc, tp1);
4159 if (net->RTO == 0) {
4160 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4162 to_ticks = MSEC_TO_TICKS(net->RTO);
4164 if (net->flight_size) {
4166 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4167 sctp_timeout_handler, &net->rxt_timer);
4168 if (net->window_probe) {
4169 net->window_probe = 0;
4172 if (net->window_probe) {
4174 * In window probes we must assure a timer
4175 * is still running there
4177 net->window_probe = 0;
4178 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4179 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4180 sctp_timeout_handler, &net->rxt_timer);
4182 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4183 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4185 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4190 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4191 (asoc->sent_queue_retran_cnt == 0) &&
4192 (win_probe_recovered == 0) &&
4195 * huh, this should not happen unless all packets are
4196 * PR-SCTP and marked to skip of course.
4198 if (sctp_fs_audit(asoc)) {
4199 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4200 net->flight_size = 0;
4202 asoc->total_flight = 0;
4203 asoc->total_flight_count = 0;
4204 asoc->sent_queue_retran_cnt = 0;
4205 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4206 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4207 sctp_flight_size_increase(tp1);
4208 sctp_total_flight_increase(stcb, tp1);
4209 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4210 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4217 /**********************************/
4218 /* Now what about shutdown issues */
4219 /**********************************/
4220 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4221 /* nothing left on sendqueue.. consider done */
4223 if ((asoc->stream_queue_cnt == 1) &&
4224 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4225 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4226 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4227 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4229 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4230 (asoc->stream_queue_cnt == 0)) {
4231 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4232 /* Need to abort here */
4233 struct mbuf *op_err;
4238 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4239 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4240 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4243 struct sctp_nets *netp;
4245 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4246 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4247 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4249 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4250 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4251 sctp_stop_timers_for_shutdown(stcb);
4252 if (asoc->alternate) {
4253 netp = asoc->alternate;
4255 netp = asoc->primary_destination;
4257 sctp_send_shutdown(stcb, netp);
4258 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4259 stcb->sctp_ep, stcb, netp);
4260 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4261 stcb->sctp_ep, stcb, netp);
4263 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4264 (asoc->stream_queue_cnt == 0)) {
4265 struct sctp_nets *netp;
4267 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4270 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4271 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4272 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4273 sctp_stop_timers_for_shutdown(stcb);
4274 if (asoc->alternate) {
4275 netp = asoc->alternate;
4277 netp = asoc->primary_destination;
4279 sctp_send_shutdown_ack(stcb, netp);
4280 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4281 stcb->sctp_ep, stcb, netp);
4284 /*********************************************/
4285 /* Here we perform PR-SCTP procedures */
4287 /*********************************************/
4288 /* C1. update advancedPeerAckPoint */
4289 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4290 asoc->advanced_peer_ack_point = cumack;
4292 /* PR-Sctp issues need to be addressed too */
4293 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4294 struct sctp_tmit_chunk *lchk;
4295 uint32_t old_adv_peer_ack_point;
4297 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4298 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4299 /* C3. See if we need to send a Fwd-TSN */
4300 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4302 * ISSUE with ECN, see FWD-TSN processing.
4304 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4305 send_forward_tsn(stcb, asoc);
4307 /* try to FR fwd-tsn's that get lost too */
4308 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4309 send_forward_tsn(stcb, asoc);
4314 /* Assure a timer is up */
4315 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4316 stcb->sctp_ep, stcb, lchk->whoTo);
4319 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4320 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4322 stcb->asoc.peers_rwnd,
4323 stcb->asoc.total_flight,
4324 stcb->asoc.total_output_queue_size);
4329 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4330 struct sctp_tcb *stcb,
4331 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4332 int *abort_now, uint8_t flags,
4333 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4335 struct sctp_association *asoc;
4336 struct sctp_tmit_chunk *tp1, *tp2;
4337 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4338 uint16_t wake_him = 0;
4339 uint32_t send_s = 0;
4341 int accum_moved = 0;
4342 int will_exit_fast_recovery = 0;
4343 uint32_t a_rwnd, old_rwnd;
4344 int win_probe_recovery = 0;
4345 int win_probe_recovered = 0;
4346 struct sctp_nets *net = NULL;
4349 uint8_t reneged_all = 0;
4350 uint8_t cmt_dac_flag;
4353 * we take any chance we can to service our queues since we cannot
4354 * get awoken when the socket is read from :<
4357 * Now perform the actual SACK handling: 1) Verify that it is not an
4358 * old sack, if so discard. 2) If there is nothing left in the send
4359 * queue (cum-ack is equal to last acked) then you have a duplicate
4360 * too, update any rwnd change and verify no timers are running.
4361 * then return. 3) Process any new consequtive data i.e. cum-ack
4362 * moved process these first and note that it moved. 4) Process any
4363 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4364 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4365 * sync up flightsizes and things, stop all timers and also check
4366 * for shutdown_pending state. If so then go ahead and send off the
4367 * shutdown. If in shutdown recv, send off the shutdown-ack and
4368 * start that timer, Ret. 9) Strike any non-acked things and do FR
4369 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4370 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4371 * if in shutdown_recv state.
4373 SCTP_TCB_LOCK_ASSERT(stcb);
4375 this_sack_lowest_newack = 0;
4376 SCTP_STAT_INCR(sctps_slowpath_sack);
4378 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4379 #ifdef SCTP_ASOCLOG_OF_TSNS
4380 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4381 stcb->asoc.cumack_log_at++;
4382 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4383 stcb->asoc.cumack_log_at = 0;
4388 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4389 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4390 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4392 old_rwnd = stcb->asoc.peers_rwnd;
4393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4394 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4395 stcb->asoc.overall_error_count,
4397 SCTP_FROM_SCTP_INDATA,
4400 stcb->asoc.overall_error_count = 0;
4402 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4403 sctp_log_sack(asoc->last_acked_seq,
4410 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4412 uint32_t *dupdata, dblock;
4414 for (i = 0; i < num_dup; i++) {
4415 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4416 sizeof(uint32_t), (uint8_t *)&dblock);
4417 if (dupdata == NULL) {
4420 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4424 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4425 tp1 = TAILQ_LAST(&asoc->sent_queue,
4426 sctpchunk_listhead);
4427 send_s = tp1->rec.data.tsn + 1;
4430 send_s = asoc->sending_seq;
4432 if (SCTP_TSN_GE(cum_ack, send_s)) {
4433 struct mbuf *op_err;
4434 char msg[SCTP_DIAG_INFO_LEN];
4437 * no way, we have not even sent this TSN out yet. Peer is
4438 * hopelessly messed up with us.
4440 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4443 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4444 tp1->rec.data.tsn, (void *)tp1);
4449 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4451 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4452 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4453 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4456 /**********************/
4457 /* 1) check the range */
4458 /**********************/
4459 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4460 /* acking something behind */
4463 /* update the Rwnd of the peer */
4464 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4465 TAILQ_EMPTY(&asoc->send_queue) &&
4466 (asoc->stream_queue_cnt == 0)) {
4467 /* nothing left on send/sent and strmq */
4468 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4469 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4470 asoc->peers_rwnd, 0, 0, a_rwnd);
4472 asoc->peers_rwnd = a_rwnd;
4473 if (asoc->sent_queue_retran_cnt) {
4474 asoc->sent_queue_retran_cnt = 0;
4476 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4477 /* SWS sender side engages */
4478 asoc->peers_rwnd = 0;
4480 /* stop any timers */
4481 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4482 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4483 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4484 net->partial_bytes_acked = 0;
4485 net->flight_size = 0;
4487 asoc->total_flight = 0;
4488 asoc->total_flight_count = 0;
4492 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4493 * things. The total byte count acked is tracked in netAckSz AND
4494 * netAck2 is used to track the total bytes acked that are un-
4495 * amibguious and were never retransmitted. We track these on a per
4496 * destination address basis.
4498 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4499 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4500 /* Drag along the window_tsn for cwr's */
4501 net->cwr_window_tsn = cum_ack;
4503 net->prev_cwnd = net->cwnd;
4508 * CMT: Reset CUC and Fast recovery algo variables before
4511 net->new_pseudo_cumack = 0;
4512 net->will_exit_fast_recovery = 0;
4513 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4514 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4517 /* process the new consecutive TSN first */
4518 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4519 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4520 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4522 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4524 * If it is less than ACKED, it is
4525 * now no-longer in flight. Higher
4526 * values may occur during marking
4528 if ((tp1->whoTo->dest_state &
4529 SCTP_ADDR_UNCONFIRMED) &&
4530 (tp1->snd_count < 2)) {
4532 * If there was no retran
4533 * and the address is
4534 * un-confirmed and we sent
4536 * sacked.. its confirmed,
4539 tp1->whoTo->dest_state &=
4540 ~SCTP_ADDR_UNCONFIRMED;
4542 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4544 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4545 tp1->whoTo->flight_size,
4547 (uint32_t)(uintptr_t)tp1->whoTo,
4550 sctp_flight_size_decrease(tp1);
4551 sctp_total_flight_decrease(stcb, tp1);
4552 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4553 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4557 tp1->whoTo->net_ack += tp1->send_size;
4559 /* CMT SFR and DAC algos */
4560 this_sack_lowest_newack = tp1->rec.data.tsn;
4561 tp1->whoTo->saw_newack = 1;
4563 if (tp1->snd_count < 2) {
4565 * True non-retransmited
4568 tp1->whoTo->net_ack2 +=
4571 /* update RTO too? */
4575 sctp_calculate_rto(stcb,
4577 &tp1->sent_rcv_time,
4578 sctp_align_safe_nocopy,
4579 SCTP_RTT_FROM_DATA);
4582 if (tp1->whoTo->rto_needed == 0) {
4583 tp1->whoTo->rto_needed = 1;
4589 * CMT: CUCv2 algorithm. From the
4590 * cumack'd TSNs, for each TSN being
4591 * acked for the first time, set the
4592 * following variables for the
4593 * corresp destination.
4594 * new_pseudo_cumack will trigger a
4596 * find_(rtx_)pseudo_cumack will
4597 * trigger search for the next
4598 * expected (rtx-)pseudo-cumack.
4600 tp1->whoTo->new_pseudo_cumack = 1;
4601 tp1->whoTo->find_pseudo_cumack = 1;
4602 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4605 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4606 sctp_log_sack(asoc->last_acked_seq,
4611 SCTP_LOG_TSN_ACKED);
4613 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4614 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4617 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4618 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4619 #ifdef SCTP_AUDITING_ENABLED
4620 sctp_audit_log(0xB3,
4621 (asoc->sent_queue_retran_cnt & 0x000000ff));
4624 if (tp1->rec.data.chunk_was_revoked) {
4625 /* deflate the cwnd */
4626 tp1->whoTo->cwnd -= tp1->book_size;
4627 tp1->rec.data.chunk_was_revoked = 0;
4629 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4630 tp1->sent = SCTP_DATAGRAM_ACKED;
4637 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4638 /* always set this up to cum-ack */
4639 asoc->this_sack_highest_gap = last_tsn;
4641 if ((num_seg > 0) || (num_nr_seg > 0)) {
4644 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4645 * to be greater than the cumack. Also reset saw_newack to 0
4648 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4649 net->saw_newack = 0;
4650 net->this_sack_highest_newack = last_tsn;
4654 * thisSackHighestGap will increase while handling NEW
4655 * segments this_sack_highest_newack will increase while
4656 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4657 * used for CMT DAC algo. saw_newack will also change.
4659 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4660 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4661 num_seg, num_nr_seg, &rto_ok)) {
4665 * validate the biggest_tsn_acked in the gap acks if strict
4666 * adherence is wanted.
4668 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4670 * peer is either confused or we are under attack.
4673 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4674 biggest_tsn_acked, send_s);
4678 /*******************************************/
4679 /* cancel ALL T3-send timer if accum moved */
4680 /*******************************************/
4681 if (asoc->sctp_cmt_on_off > 0) {
4682 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4683 if (net->new_pseudo_cumack)
4684 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4686 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4691 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4692 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4693 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4697 /********************************************/
4698 /* drop the acked chunks from the sentqueue */
4699 /********************************************/
4700 asoc->last_acked_seq = cum_ack;
4702 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4703 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4706 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4707 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4708 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4711 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4715 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4716 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4717 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4718 asoc->trigger_reset = 1;
4720 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4721 if (PR_SCTP_ENABLED(tp1->flags)) {
4722 if (asoc->pr_sctp_cnt != 0)
4723 asoc->pr_sctp_cnt--;
4725 asoc->sent_queue_cnt--;
4727 /* sa_ignore NO_NULL_CHK */
4728 sctp_free_bufspace(stcb, asoc, tp1, 1);
4729 sctp_m_freem(tp1->data);
4731 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4732 asoc->sent_queue_cnt_removeable--;
4735 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4736 sctp_log_sack(asoc->last_acked_seq,
4741 SCTP_LOG_FREE_SENT);
4743 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4746 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4748 panic("Warning flight size is positive and should be 0");
4750 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4751 asoc->total_flight);
4753 asoc->total_flight = 0;
4755 /* sa_ignore NO_NULL_CHK */
4756 if ((wake_him) && (stcb->sctp_socket)) {
4757 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4761 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4762 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4763 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4765 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4766 so = SCTP_INP_SO(stcb->sctp_ep);
4767 atomic_add_int(&stcb->asoc.refcnt, 1);
4768 SCTP_TCB_UNLOCK(stcb);
4769 SCTP_SOCKET_LOCK(so, 1);
4770 SCTP_TCB_LOCK(stcb);
4771 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4772 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4773 /* assoc was freed while we were unlocked */
4774 SCTP_SOCKET_UNLOCK(so, 1);
4778 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4779 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4780 SCTP_SOCKET_UNLOCK(so, 1);
4783 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4784 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4788 if (asoc->fast_retran_loss_recovery && accum_moved) {
4789 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4790 /* Setup so we will exit RFC2582 fast recovery */
4791 will_exit_fast_recovery = 1;
4795 * Check for revoked fragments:
4797 * if Previous sack - Had no frags then we can't have any revoked if
4798 * Previous sack - Had frag's then - If we now have frags aka
4799 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4800 * some of them. else - The peer revoked all ACKED fragments, since
4801 * we had some before and now we have NONE.
4805 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4806 asoc->saw_sack_with_frags = 1;
4807 } else if (asoc->saw_sack_with_frags) {
4808 int cnt_revoked = 0;
4810 /* Peer revoked all dg's marked or acked */
4811 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4812 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4813 tp1->sent = SCTP_DATAGRAM_SENT;
4814 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4815 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4816 tp1->whoTo->flight_size,
4818 (uint32_t)(uintptr_t)tp1->whoTo,
4821 sctp_flight_size_increase(tp1);
4822 sctp_total_flight_increase(stcb, tp1);
4823 tp1->rec.data.chunk_was_revoked = 1;
4825 * To ensure that this increase in
4826 * flightsize, which is artificial, does not
4827 * throttle the sender, we also increase the
4828 * cwnd artificially.
4830 tp1->whoTo->cwnd += tp1->book_size;
4837 asoc->saw_sack_with_frags = 0;
4840 asoc->saw_sack_with_nr_frags = 1;
4842 asoc->saw_sack_with_nr_frags = 0;
4844 /* JRS - Use the congestion control given in the CC module */
4845 if (ecne_seen == 0) {
4846 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4847 if (net->net_ack2 > 0) {
4849 * Karn's rule applies to clearing error
4850 * count, this is optional.
4852 net->error_count = 0;
4853 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4854 /* addr came good */
4855 net->dest_state |= SCTP_ADDR_REACHABLE;
4856 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4857 0, (void *)net, SCTP_SO_NOT_LOCKED);
4859 if (net == stcb->asoc.primary_destination) {
4860 if (stcb->asoc.alternate) {
4862 * release the alternate,
4865 sctp_free_remote_addr(stcb->asoc.alternate);
4866 stcb->asoc.alternate = NULL;
4869 if (net->dest_state & SCTP_ADDR_PF) {
4870 net->dest_state &= ~SCTP_ADDR_PF;
4871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4872 stcb->sctp_ep, stcb, net,
4873 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4874 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4875 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4876 /* Done with this net */
4879 /* restore any doubled timers */
4880 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4881 if (net->RTO < stcb->asoc.minrto) {
4882 net->RTO = stcb->asoc.minrto;
4884 if (net->RTO > stcb->asoc.maxrto) {
4885 net->RTO = stcb->asoc.maxrto;
4889 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4891 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4892 /* nothing left in-flight */
4893 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4894 /* stop all timers */
4895 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4897 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4898 net->flight_size = 0;
4899 net->partial_bytes_acked = 0;
4901 asoc->total_flight = 0;
4902 asoc->total_flight_count = 0;
4904 /**********************************/
4905 /* Now what about shutdown issues */
4906 /**********************************/
4907 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4908 /* nothing left on sendqueue.. consider done */
4909 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4910 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4911 asoc->peers_rwnd, 0, 0, a_rwnd);
4913 asoc->peers_rwnd = a_rwnd;
4914 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4915 /* SWS sender side engages */
4916 asoc->peers_rwnd = 0;
4919 if ((asoc->stream_queue_cnt == 1) &&
4920 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4921 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4922 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4923 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4925 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4926 (asoc->stream_queue_cnt == 0)) {
4927 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4928 /* Need to abort here */
4929 struct mbuf *op_err;
4934 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4935 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4936 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4939 struct sctp_nets *netp;
4941 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4942 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4943 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4945 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4946 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4947 sctp_stop_timers_for_shutdown(stcb);
4948 if (asoc->alternate) {
4949 netp = asoc->alternate;
4951 netp = asoc->primary_destination;
4953 sctp_send_shutdown(stcb, netp);
4954 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4955 stcb->sctp_ep, stcb, netp);
4956 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4957 stcb->sctp_ep, stcb, netp);
4960 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4961 (asoc->stream_queue_cnt == 0)) {
4962 struct sctp_nets *netp;
4964 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4967 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4968 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4969 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4970 sctp_stop_timers_for_shutdown(stcb);
4971 if (asoc->alternate) {
4972 netp = asoc->alternate;
4974 netp = asoc->primary_destination;
4976 sctp_send_shutdown_ack(stcb, netp);
4977 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4978 stcb->sctp_ep, stcb, netp);
4983 * Now here we are going to recycle net_ack for a different use...
4986 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4991 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4992 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4993 * automatically ensure that.
4995 if ((asoc->sctp_cmt_on_off > 0) &&
4996 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4997 (cmt_dac_flag == 0)) {
4998 this_sack_lowest_newack = cum_ack;
5000 if ((num_seg > 0) || (num_nr_seg > 0)) {
5001 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5002 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5004 /* JRS - Use the congestion control given in the CC module */
5005 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5007 /* Now are we exiting loss recovery ? */
5008 if (will_exit_fast_recovery) {
5009 /* Ok, we must exit fast recovery */
5010 asoc->fast_retran_loss_recovery = 0;
5012 if ((asoc->sat_t3_loss_recovery) &&
5013 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5014 /* end satellite t3 loss recovery */
5015 asoc->sat_t3_loss_recovery = 0;
5020 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5021 if (net->will_exit_fast_recovery) {
5022 /* Ok, we must exit fast recovery */
5023 net->fast_retran_loss_recovery = 0;
5027 /* Adjust and set the new rwnd value */
5028 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5029 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5030 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5032 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5033 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5034 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5035 /* SWS sender side engages */
5036 asoc->peers_rwnd = 0;
5038 if (asoc->peers_rwnd > old_rwnd) {
5039 win_probe_recovery = 1;
5042 * Now we must setup so we have a timer up for anyone with
5048 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5049 if (win_probe_recovery && (net->window_probe)) {
5050 win_probe_recovered = 1;
5052 * Find first chunk that was used with
5053 * window probe and clear the event. Put
5054 * it back into the send queue as if has
5057 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5058 if (tp1->window_probe) {
5059 sctp_window_probe_recovery(stcb, asoc, tp1);
5064 if (net->flight_size) {
5066 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5067 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5068 stcb->sctp_ep, stcb, net);
5070 if (net->window_probe) {
5071 net->window_probe = 0;
5074 if (net->window_probe) {
5076 * In window probes we must assure a timer
5077 * is still running there
5079 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5080 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5081 stcb->sctp_ep, stcb, net);
5084 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5085 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5087 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5092 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5093 (asoc->sent_queue_retran_cnt == 0) &&
5094 (win_probe_recovered == 0) &&
5097 * huh, this should not happen unless all packets are
5098 * PR-SCTP and marked to skip of course.
5100 if (sctp_fs_audit(asoc)) {
5101 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5102 net->flight_size = 0;
5104 asoc->total_flight = 0;
5105 asoc->total_flight_count = 0;
5106 asoc->sent_queue_retran_cnt = 0;
5107 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5108 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5109 sctp_flight_size_increase(tp1);
5110 sctp_total_flight_increase(stcb, tp1);
5111 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5112 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5119 /*********************************************/
5120 /* Here we perform PR-SCTP procedures */
5122 /*********************************************/
5123 /* C1. update advancedPeerAckPoint */
5124 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5125 asoc->advanced_peer_ack_point = cum_ack;
5127 /* C2. try to further move advancedPeerAckPoint ahead */
5128 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5129 struct sctp_tmit_chunk *lchk;
5130 uint32_t old_adv_peer_ack_point;
5132 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5133 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5134 /* C3. See if we need to send a Fwd-TSN */
5135 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5137 * ISSUE with ECN, see FWD-TSN processing.
5139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5140 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5141 0xee, cum_ack, asoc->advanced_peer_ack_point,
5142 old_adv_peer_ack_point);
5144 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5145 send_forward_tsn(stcb, asoc);
5147 /* try to FR fwd-tsn's that get lost too */
5148 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5149 send_forward_tsn(stcb, asoc);
5154 /* Assure a timer is up */
5155 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5156 stcb->sctp_ep, stcb, lchk->whoTo);
5159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5160 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5162 stcb->asoc.peers_rwnd,
5163 stcb->asoc.total_flight,
5164 stcb->asoc.total_output_queue_size);
5169 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5172 uint32_t cum_ack, a_rwnd;
5174 cum_ack = ntohl(cp->cumulative_tsn_ack);
5175 /* Arrange so a_rwnd does NOT change */
5176 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5178 /* Now call the express sack handling */
5179 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5183 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5184 struct sctp_stream_in *strmin)
5186 struct sctp_queued_to_read *ctl, *nctl;
5187 struct sctp_association *asoc;
5189 int need_reasm_check = 0;
5192 mid = strmin->last_mid_delivered;
5194 * First deliver anything prior to and including the stream no that
5197 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5198 if (SCTP_MID_GE(asoc->idata_supported, mid, ctl->mid)) {
5199 /* this is deliverable now */
5200 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5201 if (ctl->on_strm_q) {
5202 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5203 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5204 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5205 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5208 panic("strmin: %p ctl: %p unknown %d",
5209 strmin, ctl, ctl->on_strm_q);
5214 /* subtract pending on streams */
5215 asoc->size_on_all_streams -= ctl->length;
5216 sctp_ucount_decr(asoc->cnt_on_all_streams);
5217 /* deliver it to at least the delivery-q */
5218 if (stcb->sctp_socket) {
5219 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5220 sctp_add_to_readq(stcb->sctp_ep, stcb,
5222 &stcb->sctp_socket->so_rcv,
5223 1, SCTP_READ_LOCK_HELD,
5224 SCTP_SO_NOT_LOCKED);
5227 /* Its a fragmented message */
5228 if (ctl->first_frag_seen) {
5230 * Make it so this is next to
5231 * deliver, we restore later
5233 strmin->last_mid_delivered = ctl->mid - 1;
5234 need_reasm_check = 1;
5239 /* no more delivery now. */
5243 if (need_reasm_check) {
5246 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5247 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5248 /* Restore the next to deliver unless we are ahead */
5249 strmin->last_mid_delivered = mid;
5252 /* Left the front Partial one on */
5255 need_reasm_check = 0;
5258 * now we must deliver things in queue the normal way if any are
5261 mid = strmin->last_mid_delivered + 1;
5262 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5263 if (SCTP_MID_EQ(asoc->idata_supported, mid, ctl->mid)) {
5264 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5265 /* this is deliverable now */
5266 if (ctl->on_strm_q) {
5267 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5268 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5269 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5270 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5273 panic("strmin: %p ctl: %p unknown %d",
5274 strmin, ctl, ctl->on_strm_q);
5279 /* subtract pending on streams */
5280 asoc->size_on_all_streams -= ctl->length;
5281 sctp_ucount_decr(asoc->cnt_on_all_streams);
5282 /* deliver it to at least the delivery-q */
5283 strmin->last_mid_delivered = ctl->mid;
5284 if (stcb->sctp_socket) {
5285 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5286 sctp_add_to_readq(stcb->sctp_ep, stcb,
5288 &stcb->sctp_socket->so_rcv, 1,
5289 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5292 mid = strmin->last_mid_delivered + 1;
5294 /* Its a fragmented message */
5295 if (ctl->first_frag_seen) {
5297 * Make it so this is next to
5300 strmin->last_mid_delivered = ctl->mid - 1;
5301 need_reasm_check = 1;
5309 if (need_reasm_check) {
5310 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5317 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5318 struct sctp_association *asoc,
5319 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5321 struct sctp_queued_to_read *control;
5322 struct sctp_stream_in *strm;
5323 struct sctp_tmit_chunk *chk, *nchk;
5324 int cnt_removed = 0;
5327 * For now large messages held on the stream reasm that are complete
5328 * will be tossed too. We could in theory do more work to spin
5329 * through and stop after dumping one msg aka seeing the start of a
5330 * new msg at the head, and call the delivery function... to see if
5331 * it can be delivered... But for now we just dump everything on the
5334 strm = &asoc->strmin[stream];
5335 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5336 if (control == NULL) {
5340 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5343 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5344 /* Purge hanging chunks */
5345 if (!asoc->idata_supported && (ordered == 0)) {
5346 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5351 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5352 asoc->size_on_reasm_queue -= chk->send_size;
5353 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5355 sctp_m_freem(chk->data);
5358 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5360 if (!TAILQ_EMPTY(&control->reasm)) {
5361 /* This has to be old data, unordered */
5362 if (control->data) {
5363 sctp_m_freem(control->data);
5364 control->data = NULL;
5366 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5367 chk = TAILQ_FIRST(&control->reasm);
5368 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5369 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5370 sctp_add_chk_to_control(control, strm, stcb, asoc,
5371 chk, SCTP_READ_LOCK_HELD);
5373 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5376 if (control->on_strm_q == SCTP_ON_ORDERED) {
5377 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5378 control->on_strm_q = 0;
5379 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5380 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5381 control->on_strm_q = 0;
5383 } else if (control->on_strm_q) {
5384 panic("strm: %p ctl: %p unknown %d",
5385 strm, control, control->on_strm_q);
5388 control->on_strm_q = 0;
5389 if (control->on_read_q == 0) {
5390 sctp_free_remote_addr(control->whoFrom);
5391 if (control->data) {
5392 sctp_m_freem(control->data);
5393 control->data = NULL;
5395 sctp_free_a_readq(stcb, control);
5400 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5401 struct sctp_forward_tsn_chunk *fwd,
5402 int *abort_flag, struct mbuf *m, int offset)
5404 /* The pr-sctp fwd tsn */
5406 * here we will perform all the data receiver side steps for
5407 * processing FwdTSN, as required in by pr-sctp draft:
5409 * Assume we get FwdTSN(x):
5411 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5412 * + others we have 3) examine and update re-ordering queue on
5413 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5414 * report where we are.
5416 struct sctp_association *asoc;
5417 uint32_t new_cum_tsn, gap;
5418 unsigned int i, fwd_sz, m_size;
5420 struct sctp_stream_in *strm;
5421 struct sctp_queued_to_read *ctl, *sv;
5424 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5425 SCTPDBG(SCTP_DEBUG_INDATA1,
5426 "Bad size too small/big fwd-tsn\n");
5429 m_size = (stcb->asoc.mapping_array_size << 3);
5430 /*************************************************************/
5431 /* 1. Here we update local cumTSN and shift the bitmap array */
5432 /*************************************************************/
5433 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5435 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5436 /* Already got there ... */
5440 * now we know the new TSN is more advanced, let's find the actual
5443 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5444 asoc->cumulative_tsn = new_cum_tsn;
5445 if (gap >= m_size) {
5446 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5447 struct mbuf *op_err;
5448 char msg[SCTP_DIAG_INFO_LEN];
5451 * out of range (of single byte chunks in the rwnd I
5452 * give out). This must be an attacker.
5455 snprintf(msg, sizeof(msg),
5456 "New cum ack %8.8x too high, highest TSN %8.8x",
5457 new_cum_tsn, asoc->highest_tsn_inside_map);
5458 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5459 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5460 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5463 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5465 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5466 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5467 asoc->highest_tsn_inside_map = new_cum_tsn;
5469 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5470 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5472 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5473 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5476 SCTP_TCB_LOCK_ASSERT(stcb);
5477 for (i = 0; i <= gap; i++) {
5478 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5479 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5480 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5481 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5482 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5487 /*************************************************************/
5488 /* 2. Clear up re-assembly queue */
5489 /*************************************************************/
5491 /* This is now done as part of clearing up the stream/seq */
5492 if (asoc->idata_supported == 0) {
5495 /* Flush all the un-ordered data based on cum-tsn */
5496 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5497 for (sid = 0; sid < asoc->streamincnt; sid++) {
5498 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5500 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5502 /*******************************************************/
5503 /* 3. Update the PR-stream re-ordering queues and fix */
5504 /* delivery issues as needed. */
5505 /*******************************************************/
5506 fwd_sz -= sizeof(*fwd);
5509 unsigned int num_str;
5510 uint32_t mid, cur_mid;
5512 uint16_t ordered, flags;
5513 struct sctp_strseq *stseq, strseqbuf;
5514 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5516 offset += sizeof(*fwd);
5518 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5519 if (asoc->idata_supported) {
5520 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5522 num_str = fwd_sz / sizeof(struct sctp_strseq);
5524 for (i = 0; i < num_str; i++) {
5525 if (asoc->idata_supported) {
5526 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5527 sizeof(struct sctp_strseq_mid),
5528 (uint8_t *)&strseqbuf_m);
5529 offset += sizeof(struct sctp_strseq_mid);
5530 if (stseq_m == NULL) {
5533 sid = ntohs(stseq_m->sid);
5534 mid = ntohl(stseq_m->mid);
5535 flags = ntohs(stseq_m->flags);
5536 if (flags & PR_SCTP_UNORDERED_FLAG) {
5542 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5543 sizeof(struct sctp_strseq),
5544 (uint8_t *)&strseqbuf);
5545 offset += sizeof(struct sctp_strseq);
5546 if (stseq == NULL) {
5549 sid = ntohs(stseq->sid);
5550 mid = (uint32_t)ntohs(stseq->ssn);
5558 * Ok we now look for the stream/seq on the read
5559 * queue where its not all delivered. If we find it
5560 * we transmute the read entry into a PDI_ABORTED.
5562 if (sid >= asoc->streamincnt) {
5563 /* screwed up streams, stop! */
5566 if ((asoc->str_of_pdapi == sid) &&
5567 (asoc->ssn_of_pdapi == mid)) {
5569 * If this is the one we were partially
5570 * delivering now then we no longer are.
5571 * Note this will change with the reassembly
5574 asoc->fragmented_delivery_inprogress = 0;
5576 strm = &asoc->strmin[sid];
5577 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5578 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5580 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5581 if ((ctl->sinfo_stream == sid) &&
5582 (SCTP_MID_EQ(asoc->idata_supported, ctl->mid, mid))) {
5583 str_seq = (sid << 16) | (0x0000ffff & mid);
5584 ctl->pdapi_aborted = 1;
5585 sv = stcb->asoc.control_pdapi;
5587 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5588 TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5589 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5590 TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5592 } else if (ctl->on_strm_q) {
5593 panic("strm: %p ctl: %p unknown %d",
5594 strm, ctl, ctl->on_strm_q);
5598 stcb->asoc.control_pdapi = ctl;
5599 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5601 SCTP_PARTIAL_DELIVERY_ABORTED,
5603 SCTP_SO_NOT_LOCKED);
5604 stcb->asoc.control_pdapi = sv;
5606 } else if ((ctl->sinfo_stream == sid) &&
5607 SCTP_MID_GT(asoc->idata_supported, ctl->mid, mid)) {
5608 /* We are past our victim SSN */
5612 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5613 /* Update the sequence number */
5614 strm->last_mid_delivered = mid;
5616 /* now kick the stream the new way */
5617 /* sa_ignore NO_NULL_CHK */
5618 sctp_kick_prsctp_reorder_queue(stcb, strm);
5620 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5623 * Now slide thing forward.
5625 sctp_slide_mapping_arrays(stcb);