2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int lock_held);
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
78 /* Calculate what the rwnd would be */
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
85 * This is really set wrong with respect to a 1-2-m socket. Since
86 * the sb_cc is the count that everyone as put up. When we re-write
87 * sctp_soreceive then we will fix this so that ONLY this
88 * associations data is taken into account.
90 if (stcb->sctp_socket == NULL) {
94 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 if (stcb->asoc.sb_cc == 0 &&
99 asoc->cnt_on_reasm_queue == 0 &&
100 asoc->cnt_on_all_streams == 0) {
101 /* Full rwnd granted */
102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
105 /* get actual space */
106 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
108 * take out what has NOT been put on socket queue and we yet hold
111 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 asoc->cnt_on_reasm_queue * MSIZE));
113 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 asoc->cnt_on_all_streams * MSIZE));
120 /* what is the overhead of all these rwnd's */
121 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
123 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 * even it is 0. SWS engaged
126 if (calc < stcb->asoc.my_rwnd_control_len) {
135 * Build out our readq entry based on the incoming packet.
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139 struct sctp_nets *net,
140 uint32_t tsn, uint32_t ppid,
141 uint32_t context, uint16_t sid,
142 uint32_t mid, uint8_t flags,
145 struct sctp_queued_to_read *read_queue_e = NULL;
147 sctp_alloc_a_readq(stcb, read_queue_e);
148 if (read_queue_e == NULL) {
151 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 read_queue_e->sinfo_stream = sid;
153 read_queue_e->sinfo_flags = (flags << 8);
154 read_queue_e->sinfo_ppid = ppid;
155 read_queue_e->sinfo_context = context;
156 read_queue_e->sinfo_tsn = tsn;
157 read_queue_e->sinfo_cumtsn = tsn;
158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 read_queue_e->mid = mid;
160 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 TAILQ_INIT(&read_queue_e->reasm);
162 read_queue_e->whoFrom = net;
163 atomic_add_int(&net->ref_count, 1);
164 read_queue_e->data = dm;
165 read_queue_e->stcb = stcb;
166 read_queue_e->port_from = stcb->rport;
168 return (read_queue_e);
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
174 struct sctp_extrcvinfo *seinfo;
175 struct sctp_sndrcvinfo *outinfo;
176 struct sctp_rcvinfo *rcvinfo;
177 struct sctp_nxtinfo *nxtinfo;
184 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 /* user does not want any ancillary data */
192 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
195 seinfo = (struct sctp_extrcvinfo *)sinfo;
196 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
199 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
206 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
209 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
215 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
220 SCTP_BUF_LEN(ret) = 0;
222 /* We need a CMSG header followed by the struct */
223 cmh = mtod(ret, struct cmsghdr *);
225 * Make sure that there is no un-initialized padding between the
226 * cmsg header and cmsg data and after the cmsg data.
229 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 cmh->cmsg_level = IPPROTO_SCTP;
231 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 cmh->cmsg_type = SCTP_RCVINFO;
233 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 rcvinfo->rcv_context = sinfo->sinfo_context;
241 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
246 cmh->cmsg_level = IPPROTO_SCTP;
247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 cmh->cmsg_type = SCTP_NXTINFO;
249 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 nxtinfo->nxt_flags = 0;
252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 nxtinfo->nxt_flags |= SCTP_UNORDERED;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 nxtinfo->nxt_flags |= SCTP_COMPLETE;
261 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
267 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 cmh->cmsg_level = IPPROTO_SCTP;
269 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 cmh->cmsg_type = SCTP_EXTRCV;
273 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 cmh->cmsg_type = SCTP_SNDRCV;
279 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
287 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
289 uint32_t gap, i, cumackp1;
291 int in_r = 0, in_nr = 0;
293 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
296 cumackp1 = asoc->cumulative_tsn + 1;
297 if (SCTP_TSN_GT(cumackp1, tsn)) {
299 * this tsn is behind the cum ack and thus we don't need to
300 * worry about it being moved from one to the other.
304 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
305 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
306 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 if ((in_r == 0) && (in_nr == 0)) {
309 panic("Things are really messed up now");
311 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
312 sctp_print_mapping_array(asoc);
316 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
318 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
319 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
320 asoc->highest_tsn_inside_nr_map = tsn;
322 if (tsn == asoc->highest_tsn_inside_map) {
323 /* We must back down to see what the new highest is */
324 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
325 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 asoc->highest_tsn_inside_map = i;
333 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
339 sctp_place_control_in_stream(struct sctp_stream_in *strm,
340 struct sctp_association *asoc,
341 struct sctp_queued_to_read *control)
343 struct sctp_queued_to_read *at;
344 struct sctp_readhead *q;
345 uint8_t flags, unordered;
347 flags = (control->sinfo_flags >> 8);
348 unordered = flags & SCTP_DATA_UNORDERED;
350 q = &strm->uno_inqueue;
351 if (asoc->idata_supported == 0) {
352 if (!TAILQ_EMPTY(q)) {
354 * Only one stream can be here in old style
359 TAILQ_INSERT_TAIL(q, control, next_instrm);
360 control->on_strm_q = SCTP_ON_UNORDERED;
366 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
367 control->end_added = 1;
368 control->first_frag_seen = 1;
369 control->last_frag_seen = 1;
371 if (TAILQ_EMPTY(q)) {
373 TAILQ_INSERT_HEAD(q, control, next_instrm);
375 control->on_strm_q = SCTP_ON_UNORDERED;
377 control->on_strm_q = SCTP_ON_ORDERED;
381 TAILQ_FOREACH(at, q, next_instrm) {
382 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
384 * one in queue is bigger than the new one,
385 * insert before this one
387 TAILQ_INSERT_BEFORE(at, control, next_instrm);
389 control->on_strm_q = SCTP_ON_UNORDERED;
391 control->on_strm_q = SCTP_ON_ORDERED;
394 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
396 * Gak, He sent me a duplicate msg id
397 * number?? return -1 to abort.
401 if (TAILQ_NEXT(at, next_instrm) == NULL) {
403 * We are at the end, insert it
406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
407 sctp_log_strm_del(control, at,
408 SCTP_STR_LOG_FROM_INSERT_TL);
410 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
412 control->on_strm_q = SCTP_ON_UNORDERED;
414 control->on_strm_q = SCTP_ON_ORDERED;
425 sctp_abort_in_reasm(struct sctp_tcb *stcb,
426 struct sctp_queued_to_read *control,
427 struct sctp_tmit_chunk *chk,
428 int *abort_flag, int opspot)
430 char msg[SCTP_DIAG_INFO_LEN];
433 if (stcb->asoc.idata_supported) {
434 snprintf(msg, sizeof(msg),
435 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
437 control->fsn_included,
440 chk->rec.data.fsn, chk->rec.data.mid);
442 snprintf(msg, sizeof(msg),
443 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
445 control->fsn_included,
449 (uint16_t)chk->rec.data.mid);
451 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
452 sctp_m_freem(chk->data);
454 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
455 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
456 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
461 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
464 * The control could not be placed and must be cleaned.
466 struct sctp_tmit_chunk *chk, *nchk;
468 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
469 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
471 sctp_m_freem(chk->data);
473 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
475 sctp_free_a_readq(stcb, control);
479 * Queue the chunk either right into the socket buffer if it is the next one
480 * to go OR put it in the correct place in the delivery queue. If we do
481 * append to the so_buf, keep doing so until we are out of order as
482 * long as the control's entered are non-fragmented.
485 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
486 struct sctp_association *asoc,
487 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
490 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
491 * all the data in one stream this could happen quite rapidly. One
492 * could use the TSN to keep track of things, but this scheme breaks
493 * down in the other type of stream usage that could occur. Send a
494 * single msg to stream 0, send 4Billion messages to stream 1, now
495 * send a message to stream 0. You have a situation where the TSN
496 * has wrapped but not in the stream. Is this worth worrying about
497 * or should we just change our queue sort at the bottom to be by
500 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
501 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
502 * assignment this could happen... and I don't see how this would be
503 * a violation. So for now I am undecided an will leave the sort by
504 * SSN alone. Maybe a hybred approach is the answer
507 struct sctp_queued_to_read *at;
511 struct sctp_stream_in *strm;
512 char msg[SCTP_DIAG_INFO_LEN];
514 strm = &asoc->strmin[control->sinfo_stream];
515 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
516 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
518 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
519 /* The incoming sseq is behind where we last delivered? */
520 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
521 strm->last_mid_delivered, control->mid);
523 * throw it in the stream so it gets cleaned up in
524 * association destruction
526 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
527 if (asoc->idata_supported) {
528 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
529 strm->last_mid_delivered, control->sinfo_tsn,
530 control->sinfo_stream, control->mid);
532 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
533 (uint16_t)strm->last_mid_delivered,
535 control->sinfo_stream,
536 (uint16_t)control->mid);
538 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
539 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
540 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
546 asoc->size_on_all_streams += control->length;
547 sctp_ucount_incr(asoc->cnt_on_all_streams);
548 nxt_todel = strm->last_mid_delivered + 1;
549 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
550 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
553 so = SCTP_INP_SO(stcb->sctp_ep);
554 atomic_add_int(&stcb->asoc.refcnt, 1);
555 SCTP_TCB_UNLOCK(stcb);
556 SCTP_SOCKET_LOCK(so, 1);
558 atomic_subtract_int(&stcb->asoc.refcnt, 1);
559 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
560 SCTP_SOCKET_UNLOCK(so, 1);
564 /* can be delivered right away? */
565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
566 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
568 /* EY it wont be queued if it could be delivered directly */
570 if (asoc->size_on_all_streams >= control->length) {
571 asoc->size_on_all_streams -= control->length;
574 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
576 asoc->size_on_all_streams = 0;
579 sctp_ucount_decr(asoc->cnt_on_all_streams);
580 strm->last_mid_delivered++;
581 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
582 sctp_add_to_readq(stcb->sctp_ep, stcb,
584 &stcb->sctp_socket->so_rcv, 1,
585 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
586 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
588 nxt_todel = strm->last_mid_delivered + 1;
589 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
590 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
591 if (control->on_strm_q == SCTP_ON_ORDERED) {
592 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
593 if (asoc->size_on_all_streams >= control->length) {
594 asoc->size_on_all_streams -= control->length;
597 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
599 asoc->size_on_all_streams = 0;
602 sctp_ucount_decr(asoc->cnt_on_all_streams);
605 panic("Huh control: %p is on_strm_q: %d",
606 control, control->on_strm_q);
609 control->on_strm_q = 0;
610 strm->last_mid_delivered++;
612 * We ignore the return of deliver_data here
613 * since we always can hold the chunk on the
614 * d-queue. And we have a finite number that
615 * can be delivered from the strq.
617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
618 sctp_log_strm_del(control, NULL,
619 SCTP_STR_LOG_FROM_IMMED_DEL);
621 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
622 sctp_add_to_readq(stcb->sctp_ep, stcb,
624 &stcb->sctp_socket->so_rcv, 1,
625 SCTP_READ_LOCK_NOT_HELD,
628 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
633 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
634 SCTP_SOCKET_UNLOCK(so, 1);
639 * Ok, we did not deliver this guy, find the correct place
640 * to put it on the queue.
642 if (sctp_place_control_in_stream(strm, asoc, control)) {
643 snprintf(msg, sizeof(msg),
644 "Queue to str MID: %u duplicate",
646 sctp_clean_up_control(stcb, control);
647 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
648 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
649 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
657 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
659 struct mbuf *m, *prev = NULL;
660 struct sctp_tcb *stcb;
662 stcb = control->stcb;
663 control->held_length = 0;
667 if (SCTP_BUF_LEN(m) == 0) {
668 /* Skip mbufs with NO length */
671 control->data = sctp_m_free(m);
674 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
675 m = SCTP_BUF_NEXT(prev);
678 control->tail_mbuf = prev;
683 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
684 if (control->on_read_q) {
686 * On read queue so we must increment the SB stuff,
687 * we assume caller has done any locks of SB.
689 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
691 m = SCTP_BUF_NEXT(m);
694 control->tail_mbuf = prev;
699 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
701 struct mbuf *prev = NULL;
702 struct sctp_tcb *stcb;
704 stcb = control->stcb;
707 panic("Control broken");
712 if (control->tail_mbuf == NULL) {
715 sctp_setup_tail_pointer(control);
718 control->tail_mbuf->m_next = m;
720 if (SCTP_BUF_LEN(m) == 0) {
721 /* Skip mbufs with NO length */
724 control->tail_mbuf->m_next = sctp_m_free(m);
725 m = control->tail_mbuf->m_next;
727 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
728 m = SCTP_BUF_NEXT(prev);
731 control->tail_mbuf = prev;
736 if (control->on_read_q) {
738 * On read queue so we must increment the SB stuff,
739 * we assume caller has done any locks of SB.
741 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
743 *added += SCTP_BUF_LEN(m);
744 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
745 m = SCTP_BUF_NEXT(m);
748 control->tail_mbuf = prev;
753 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
755 memset(nc, 0, sizeof(struct sctp_queued_to_read));
756 nc->sinfo_stream = control->sinfo_stream;
757 nc->mid = control->mid;
758 TAILQ_INIT(&nc->reasm);
759 nc->top_fsn = control->top_fsn;
760 nc->mid = control->mid;
761 nc->sinfo_flags = control->sinfo_flags;
762 nc->sinfo_ppid = control->sinfo_ppid;
763 nc->sinfo_context = control->sinfo_context;
764 nc->fsn_included = 0xffffffff;
765 nc->sinfo_tsn = control->sinfo_tsn;
766 nc->sinfo_cumtsn = control->sinfo_cumtsn;
767 nc->sinfo_assoc_id = control->sinfo_assoc_id;
768 nc->whoFrom = control->whoFrom;
769 atomic_add_int(&nc->whoFrom->ref_count, 1);
770 nc->stcb = control->stcb;
771 nc->port_from = control->port_from;
775 sctp_reset_a_control(struct sctp_queued_to_read *control,
776 struct sctp_inpcb *inp, uint32_t tsn)
778 control->fsn_included = tsn;
779 if (control->on_read_q) {
781 * We have to purge it from there, hopefully this will work
784 TAILQ_REMOVE(&inp->read_queue, control, next);
785 control->on_read_q = 0;
790 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
791 struct sctp_association *asoc,
792 struct sctp_stream_in *strm,
793 struct sctp_queued_to_read *control,
795 int inp_read_lock_held)
798 * Special handling for the old un-ordered data chunk. All the
799 * chunks/TSN's go to mid 0. So we have to do the old style watching
800 * to see if we have it all. If you return one, no other control
801 * entries on the un-ordered queue will be looked at. In theory
802 * there should be no others entries in reality, unless the guy is
803 * sending both unordered NDATA and unordered DATA...
805 struct sctp_tmit_chunk *chk, *lchk, *tchk;
807 struct sctp_queued_to_read *nc;
810 if (control->first_frag_seen == 0) {
811 /* Nothing we can do, we have not seen the first piece yet */
814 /* Collapse any we can */
817 fsn = control->fsn_included + 1;
818 /* Now what can we add? */
819 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
820 if (chk->rec.data.fsn == fsn) {
822 sctp_alloc_a_readq(stcb, nc);
826 memset(nc, 0, sizeof(struct sctp_queued_to_read));
827 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
828 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
832 if (control->end_added) {
834 if (!TAILQ_EMPTY(&control->reasm)) {
836 * Ok we have to move anything left
837 * on the control queue to a new
840 sctp_build_readq_entry_from_ctl(nc, control);
841 tchk = TAILQ_FIRST(&control->reasm);
842 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
843 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
844 if (asoc->size_on_reasm_queue >= tchk->send_size) {
845 asoc->size_on_reasm_queue -= tchk->send_size;
848 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
850 asoc->size_on_reasm_queue = 0;
853 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
854 nc->first_frag_seen = 1;
855 nc->fsn_included = tchk->rec.data.fsn;
856 nc->data = tchk->data;
857 nc->sinfo_ppid = tchk->rec.data.ppid;
858 nc->sinfo_tsn = tchk->rec.data.tsn;
859 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
861 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
862 sctp_setup_tail_pointer(nc);
863 tchk = TAILQ_FIRST(&control->reasm);
865 /* Spin the rest onto the queue */
867 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
868 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
869 tchk = TAILQ_FIRST(&control->reasm);
872 * Now lets add it to the queue
873 * after removing control
875 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
876 nc->on_strm_q = SCTP_ON_UNORDERED;
877 if (control->on_strm_q) {
878 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
879 control->on_strm_q = 0;
882 if (control->pdapi_started) {
883 strm->pd_api_started = 0;
884 control->pdapi_started = 0;
886 if (control->on_strm_q) {
887 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
888 control->on_strm_q = 0;
889 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
891 if (control->on_read_q == 0) {
892 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
893 &stcb->sctp_socket->so_rcv, control->end_added,
894 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
896 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
897 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
899 * Switch to the new guy and
905 if (nc->on_strm_q == 0) {
906 sctp_free_a_readq(stcb, nc);
911 sctp_free_a_readq(stcb, nc);
918 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
919 strm->pd_api_started = 1;
920 control->pdapi_started = 1;
921 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
922 &stcb->sctp_socket->so_rcv, control->end_added,
923 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
924 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
932 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
933 struct sctp_association *asoc,
934 struct sctp_queued_to_read *control,
935 struct sctp_tmit_chunk *chk,
938 struct sctp_tmit_chunk *at;
942 * Here we need to place the chunk into the control structure sorted
943 * in the correct order.
945 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
946 /* Its the very first one. */
947 SCTPDBG(SCTP_DEBUG_XXX,
948 "chunk is a first fsn: %u becomes fsn_included\n",
950 if (control->first_frag_seen) {
952 * In old un-ordered we can reassembly on one
953 * control multiple messages. As long as the next
954 * FIRST is greater then the old first (TSN i.e. FSN
960 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
962 * Easy way the start of a new guy beyond
967 if ((chk->rec.data.fsn == control->fsn_included) ||
968 (control->pdapi_started)) {
970 * Ok this should not happen, if it does we
971 * started the pd-api on the higher TSN
972 * (since the equals part is a TSN failure
975 * We are completly hosed in that case since
976 * I have no way to recover. This really
977 * will only happen if we can get more TSN's
978 * higher before the pd-api-point.
980 sctp_abort_in_reasm(stcb, control, chk,
982 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
987 * Ok we have two firsts and the one we just got is
988 * smaller than the one we previously placed.. yuck!
989 * We must swap them out.
992 tdata = control->data;
993 control->data = chk->data;
995 /* Save the lengths */
996 chk->send_size = control->length;
997 /* Recompute length of control and tail pointer */
998 sctp_setup_tail_pointer(control);
999 /* Fix the FSN included */
1000 tmp = control->fsn_included;
1001 control->fsn_included = chk->rec.data.fsn;
1002 chk->rec.data.fsn = tmp;
1003 /* Fix the TSN included */
1004 tmp = control->sinfo_tsn;
1005 control->sinfo_tsn = chk->rec.data.tsn;
1006 chk->rec.data.tsn = tmp;
1007 /* Fix the PPID included */
1008 tmp = control->sinfo_ppid;
1009 control->sinfo_ppid = chk->rec.data.ppid;
1010 chk->rec.data.ppid = tmp;
1011 /* Fix tail pointer */
1014 control->first_frag_seen = 1;
1015 control->fsn_included = chk->rec.data.fsn;
1016 control->top_fsn = chk->rec.data.fsn;
1017 control->sinfo_tsn = chk->rec.data.tsn;
1018 control->sinfo_ppid = chk->rec.data.ppid;
1019 control->data = chk->data;
1020 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1022 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1023 sctp_setup_tail_pointer(control);
1028 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1029 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1031 * This one in queue is bigger than the new one,
1032 * insert the new one before at.
1034 asoc->size_on_reasm_queue += chk->send_size;
1035 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1037 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1039 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1041 * They sent a duplicate fsn number. This really
1042 * should not happen since the FSN is a TSN and it
1043 * should have been dropped earlier.
1045 sctp_abort_in_reasm(stcb, control, chk,
1047 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1052 if (inserted == 0) {
1053 /* Its at the end */
1054 asoc->size_on_reasm_queue += chk->send_size;
1055 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1056 control->top_fsn = chk->rec.data.fsn;
1057 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1062 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1063 struct sctp_stream_in *strm, int inp_read_lock_held)
1066 * Given a stream, strm, see if any of the SSN's on it that are
1067 * fragmented are ready to deliver. If so go ahead and place them on
1068 * the read queue. In so placing if we have hit the end, then we
1069 * need to remove them from the stream's queue.
1071 struct sctp_queued_to_read *control, *nctl = NULL;
1072 uint32_t next_to_del;
1076 if (stcb->sctp_socket) {
1077 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1078 stcb->sctp_ep->partial_delivery_point);
1080 pd_point = stcb->sctp_ep->partial_delivery_point;
1082 control = TAILQ_FIRST(&strm->uno_inqueue);
1084 if ((control != NULL) &&
1085 (asoc->idata_supported == 0)) {
1086 /* Special handling needed for "old" data format */
1087 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1091 if (strm->pd_api_started) {
1092 /* Can't add more */
1096 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1097 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1098 nctl = TAILQ_NEXT(control, next_instrm);
1099 if (control->end_added) {
1100 /* We just put the last bit on */
1101 if (control->on_strm_q) {
1103 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1104 panic("Huh control: %p on_q: %d -- not unordered?",
1105 control, control->on_strm_q);
1108 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1109 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1110 control->on_strm_q = 0;
1112 if (control->on_read_q == 0) {
1113 sctp_add_to_readq(stcb->sctp_ep, stcb,
1115 &stcb->sctp_socket->so_rcv, control->end_added,
1116 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1119 /* Can we do a PD-API for this un-ordered guy? */
1120 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1121 strm->pd_api_started = 1;
1122 control->pdapi_started = 1;
1123 sctp_add_to_readq(stcb->sctp_ep, stcb,
1125 &stcb->sctp_socket->so_rcv, control->end_added,
1126 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1134 control = TAILQ_FIRST(&strm->inqueue);
1135 if (strm->pd_api_started) {
1136 /* Can't add more */
1139 if (control == NULL) {
1142 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1144 * Ok the guy at the top was being partially delivered
1145 * completed, so we remove it. Note the pd_api flag was
1146 * taken off when the chunk was merged on in
1147 * sctp_queue_data_for_reasm below.
1149 nctl = TAILQ_NEXT(control, next_instrm);
1150 SCTPDBG(SCTP_DEBUG_XXX,
1151 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1152 control, control->end_added, control->mid,
1153 control->top_fsn, control->fsn_included,
1154 strm->last_mid_delivered);
1155 if (control->end_added) {
1156 if (control->on_strm_q) {
1158 if (control->on_strm_q != SCTP_ON_ORDERED) {
1159 panic("Huh control: %p on_q: %d -- not ordered?",
1160 control, control->on_strm_q);
1163 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1164 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1165 if (asoc->size_on_all_streams >= control->length) {
1166 asoc->size_on_all_streams -= control->length;
1169 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1171 asoc->size_on_all_streams = 0;
1174 sctp_ucount_decr(asoc->cnt_on_all_streams);
1175 control->on_strm_q = 0;
1177 if (strm->pd_api_started && control->pdapi_started) {
1178 control->pdapi_started = 0;
1179 strm->pd_api_started = 0;
1181 if (control->on_read_q == 0) {
1182 sctp_add_to_readq(stcb->sctp_ep, stcb,
1184 &stcb->sctp_socket->so_rcv, control->end_added,
1185 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1190 if (strm->pd_api_started) {
1192 * Can't add more must have gotten an un-ordered above being
1193 * partially delivered.
1198 next_to_del = strm->last_mid_delivered + 1;
1200 SCTPDBG(SCTP_DEBUG_XXX,
1201 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1202 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1204 nctl = TAILQ_NEXT(control, next_instrm);
1205 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1206 (control->first_frag_seen)) {
1209 /* Ok we can deliver it onto the stream. */
1210 if (control->end_added) {
1211 /* We are done with it afterwards */
1212 if (control->on_strm_q) {
1214 if (control->on_strm_q != SCTP_ON_ORDERED) {
1215 panic("Huh control: %p on_q: %d -- not ordered?",
1216 control, control->on_strm_q);
1219 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1220 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1221 if (asoc->size_on_all_streams >= control->length) {
1222 asoc->size_on_all_streams -= control->length;
1225 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1227 asoc->size_on_all_streams = 0;
1230 sctp_ucount_decr(asoc->cnt_on_all_streams);
1231 control->on_strm_q = 0;
1235 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1237 * A singleton now slipping through - mark
1238 * it non-revokable too
1240 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1241 } else if (control->end_added == 0) {
1243 * Check if we can defer adding until its
1246 if ((control->length < pd_point) || (strm->pd_api_started)) {
1248 * Don't need it or cannot add more
1249 * (one being delivered that way)
1254 done = (control->end_added) && (control->last_frag_seen);
1255 if (control->on_read_q == 0) {
1257 if (asoc->size_on_all_streams >= control->length) {
1258 asoc->size_on_all_streams -= control->length;
1261 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1263 asoc->size_on_all_streams = 0;
1266 strm->pd_api_started = 1;
1267 control->pdapi_started = 1;
1269 sctp_add_to_readq(stcb->sctp_ep, stcb,
1271 &stcb->sctp_socket->so_rcv, control->end_added,
1272 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1274 strm->last_mid_delivered = next_to_del;
1287 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1288 struct sctp_stream_in *strm,
1289 struct sctp_tcb *stcb, struct sctp_association *asoc,
1290 struct sctp_tmit_chunk *chk, int hold_rlock)
1293 * Given a control and a chunk, merge the data from the chk onto the
1294 * control and free up the chunk resources.
1299 if (control->on_read_q && (hold_rlock == 0)) {
1301 * Its being pd-api'd so we must do some locks.
1303 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1306 if (control->data == NULL) {
1307 control->data = chk->data;
1308 sctp_setup_tail_pointer(control);
1310 sctp_add_to_tail_pointer(control, chk->data, &added);
1312 control->fsn_included = chk->rec.data.fsn;
1313 asoc->size_on_reasm_queue -= chk->send_size;
1314 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1315 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1317 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1318 control->first_frag_seen = 1;
1319 control->sinfo_tsn = chk->rec.data.tsn;
1320 control->sinfo_ppid = chk->rec.data.ppid;
1322 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1324 if ((control->on_strm_q) && (control->on_read_q)) {
1325 if (control->pdapi_started) {
1326 control->pdapi_started = 0;
1327 strm->pd_api_started = 0;
1329 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1331 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1332 control->on_strm_q = 0;
1333 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1335 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1337 * Don't need to decrement
1338 * size_on_all_streams, since control is on
1341 sctp_ucount_decr(asoc->cnt_on_all_streams);
1342 control->on_strm_q = 0;
1344 } else if (control->on_strm_q) {
1345 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1346 control->on_strm_q);
1350 control->end_added = 1;
1351 control->last_frag_seen = 1;
1354 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1356 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1361 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1362 * queue, see if anthing can be delivered. If so pull it off (or as much as
1363 * we can. If we run out of space then we must dump what we can and set the
1364 * appropriate flag to say we queued what we could.
1367 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1368 struct sctp_queued_to_read *control,
1369 struct sctp_tmit_chunk *chk,
1370 int created_control,
1371 int *abort_flag, uint32_t tsn)
1374 struct sctp_tmit_chunk *at, *nat;
1375 struct sctp_stream_in *strm;
1376 int do_wakeup, unordered;
1379 strm = &asoc->strmin[control->sinfo_stream];
1381 * For old un-ordered data chunks.
1383 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1388 /* Must be added to the stream-in queue */
1389 if (created_control) {
1390 if (unordered == 0) {
1391 sctp_ucount_incr(asoc->cnt_on_all_streams);
1393 if (sctp_place_control_in_stream(strm, asoc, control)) {
1394 /* Duplicate SSN? */
1395 sctp_abort_in_reasm(stcb, control, chk,
1397 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1398 sctp_clean_up_control(stcb, control);
1401 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1403 * Ok we created this control and now lets validate
1404 * that its legal i.e. there is a B bit set, if not
1405 * and we have up to the cum-ack then its invalid.
1407 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1408 sctp_abort_in_reasm(stcb, control, chk,
1410 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1415 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1416 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1420 * Ok we must queue the chunk into the reasembly portion: o if its
1421 * the first it goes to the control mbuf. o if its not first but the
1422 * next in sequence it goes to the control, and each succeeding one
1423 * in order also goes. o if its not in order we place it on the list
1426 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1427 /* Its the very first one. */
1428 SCTPDBG(SCTP_DEBUG_XXX,
1429 "chunk is a first fsn: %u becomes fsn_included\n",
1431 if (control->first_frag_seen) {
1433 * Error on senders part, they either sent us two
1434 * data chunks with FIRST, or they sent two
1435 * un-ordered chunks that were fragmented at the
1436 * same time in the same stream.
1438 sctp_abort_in_reasm(stcb, control, chk,
1440 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1443 control->first_frag_seen = 1;
1444 control->sinfo_ppid = chk->rec.data.ppid;
1445 control->sinfo_tsn = chk->rec.data.tsn;
1446 control->fsn_included = chk->rec.data.fsn;
1447 control->data = chk->data;
1448 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1450 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1451 sctp_setup_tail_pointer(control);
1452 asoc->size_on_all_streams += control->length;
1454 /* Place the chunk in our list */
1457 if (control->last_frag_seen == 0) {
1458 /* Still willing to raise highest FSN seen */
1459 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1460 SCTPDBG(SCTP_DEBUG_XXX,
1461 "We have a new top_fsn: %u\n",
1463 control->top_fsn = chk->rec.data.fsn;
1465 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1466 SCTPDBG(SCTP_DEBUG_XXX,
1467 "The last fsn is now in place fsn: %u\n",
1469 control->last_frag_seen = 1;
1471 if (asoc->idata_supported || control->first_frag_seen) {
1473 * For IDATA we always check since we know
1474 * that the first fragment is 0. For old
1475 * DATA we have to receive the first before
1476 * we know the first FSN (which is the TSN).
1478 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1480 * We have already delivered up to
1483 sctp_abort_in_reasm(stcb, control, chk,
1485 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1490 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1491 /* Second last? huh? */
1492 SCTPDBG(SCTP_DEBUG_XXX,
1493 "Duplicate last fsn: %u (top: %u) -- abort\n",
1494 chk->rec.data.fsn, control->top_fsn);
1495 sctp_abort_in_reasm(stcb, control,
1497 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1500 if (asoc->idata_supported || control->first_frag_seen) {
1502 * For IDATA we always check since we know
1503 * that the first fragment is 0. For old
1504 * DATA we have to receive the first before
1505 * we know the first FSN (which is the TSN).
1508 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1510 * We have already delivered up to
1513 SCTPDBG(SCTP_DEBUG_XXX,
1514 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1515 chk->rec.data.fsn, control->fsn_included);
1516 sctp_abort_in_reasm(stcb, control, chk,
1518 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1523 * validate not beyond top FSN if we have seen last
1526 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1527 SCTPDBG(SCTP_DEBUG_XXX,
1528 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1531 sctp_abort_in_reasm(stcb, control, chk,
1533 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1538 * If we reach here, we need to place the new chunk in the
1539 * reassembly for this control.
1541 SCTPDBG(SCTP_DEBUG_XXX,
1542 "chunk is a not first fsn: %u needs to be inserted\n",
1544 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1545 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1547 * This one in queue is bigger than the new
1548 * one, insert the new one before at.
1550 SCTPDBG(SCTP_DEBUG_XXX,
1551 "Insert it before fsn: %u\n",
1553 asoc->size_on_reasm_queue += chk->send_size;
1554 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1555 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1558 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1560 * Gak, He sent me a duplicate str seq
1564 * foo bar, I guess I will just free this
1565 * new guy, should we abort too? FIX ME
1566 * MAYBE? Or it COULD be that the SSN's have
1567 * wrapped. Maybe I should compare to TSN
1568 * somehow... sigh for now just blow away
1571 SCTPDBG(SCTP_DEBUG_XXX,
1572 "Duplicate to fsn: %u -- abort\n",
1574 sctp_abort_in_reasm(stcb, control,
1576 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1580 if (inserted == 0) {
1581 /* Goes on the end */
1582 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1584 asoc->size_on_reasm_queue += chk->send_size;
1585 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1586 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1590 * Ok lets see if we can suck any up into the control structure that
1591 * are in seq if it makes sense.
1595 * If the first fragment has not been seen there is no sense in
1598 if (control->first_frag_seen) {
1599 next_fsn = control->fsn_included + 1;
1600 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1601 if (at->rec.data.fsn == next_fsn) {
1602 /* We can add this one now to the control */
1603 SCTPDBG(SCTP_DEBUG_XXX,
1604 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1607 next_fsn, control->fsn_included);
1608 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1609 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1610 if (control->on_read_q) {
1614 * We only add to the
1615 * size-on-all-streams if its not on
1616 * the read q. The read q flag will
1617 * cause a sballoc so its accounted
1620 asoc->size_on_all_streams += lenadded;
1623 if (control->end_added && control->pdapi_started) {
1624 if (strm->pd_api_started) {
1625 strm->pd_api_started = 0;
1626 control->pdapi_started = 0;
1628 if (control->on_read_q == 0) {
1629 sctp_add_to_readq(stcb->sctp_ep, stcb,
1631 &stcb->sctp_socket->so_rcv, control->end_added,
1632 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1642 /* Need to wakeup the reader */
1643 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1647 static struct sctp_queued_to_read *
1648 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1650 struct sctp_queued_to_read *control;
1653 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1654 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1659 if (idata_supported) {
1660 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1661 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1666 control = TAILQ_FIRST(&strm->uno_inqueue);
1673 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1674 struct mbuf **m, int offset, int chk_length,
1675 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1676 int *break_flag, int last_chunk, uint8_t chk_type)
1678 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1679 uint32_t tsn, fsn, gap, mid;
1682 int need_reasm_check = 0;
1684 struct mbuf *op_err;
1685 char msg[SCTP_DIAG_INFO_LEN];
1686 struct sctp_queued_to_read *control, *ncontrol;
1689 struct sctp_stream_reset_list *liste;
1692 int created_control = 0;
1694 if (chk_type == SCTP_IDATA) {
1695 struct sctp_idata_chunk *chunk, chunk_buf;
1697 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1698 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1699 chk_flags = chunk->ch.chunk_flags;
1700 clen = sizeof(struct sctp_idata_chunk);
1701 tsn = ntohl(chunk->dp.tsn);
1702 sid = ntohs(chunk->dp.sid);
1703 mid = ntohl(chunk->dp.mid);
1704 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1706 ppid = chunk->dp.ppid_fsn.ppid;
1708 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1709 ppid = 0xffffffff; /* Use as an invalid value. */
1712 struct sctp_data_chunk *chunk, chunk_buf;
1714 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1715 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1716 chk_flags = chunk->ch.chunk_flags;
1717 clen = sizeof(struct sctp_data_chunk);
1718 tsn = ntohl(chunk->dp.tsn);
1719 sid = ntohs(chunk->dp.sid);
1720 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1722 ppid = chunk->dp.ppid;
1724 if ((size_t)chk_length == clen) {
1726 * Need to send an abort since we had a empty data chunk.
1728 op_err = sctp_generate_no_user_data_cause(tsn);
1729 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1730 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1734 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1735 asoc->send_sack = 1;
1737 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1738 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1739 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1744 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1745 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1746 /* It is a duplicate */
1747 SCTP_STAT_INCR(sctps_recvdupdata);
1748 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1749 /* Record a dup for the next outbound sack */
1750 asoc->dup_tsns[asoc->numduptsns] = tsn;
1753 asoc->send_sack = 1;
1756 /* Calculate the number of TSN's between the base and this TSN */
1757 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1758 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1759 /* Can't hold the bit in the mapping at max array, toss it */
1762 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1763 SCTP_TCB_LOCK_ASSERT(stcb);
1764 if (sctp_expand_mapping_array(asoc, gap)) {
1765 /* Can't expand, drop it */
1769 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1772 /* See if we have received this one already */
1773 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1774 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1775 SCTP_STAT_INCR(sctps_recvdupdata);
1776 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1777 /* Record a dup for the next outbound sack */
1778 asoc->dup_tsns[asoc->numduptsns] = tsn;
1781 asoc->send_sack = 1;
1785 * Check to see about the GONE flag, duplicates would cause a sack
1786 * to be sent up above
1788 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1789 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1790 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1792 * wait a minute, this guy is gone, there is no longer a
1793 * receiver. Send peer an ABORT!
1795 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1796 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1801 * Now before going further we see if there is room. If NOT then we
1802 * MAY let one through only IF this TSN is the one we are waiting
1803 * for on a partial delivery API.
1806 /* Is the stream valid? */
1807 if (sid >= asoc->streamincnt) {
1808 struct sctp_error_invalid_stream *cause;
1810 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1811 0, M_NOWAIT, 1, MT_DATA);
1812 if (op_err != NULL) {
1813 /* add some space up front so prepend will work well */
1814 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1815 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1817 * Error causes are just param's and this one has
1818 * two back to back phdr, one with the error type
1819 * and size, the other with the streamid and a rsvd
1821 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1822 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1823 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1824 cause->stream_id = htons(sid);
1825 cause->reserved = htons(0);
1826 sctp_queue_op_err(stcb, op_err);
1828 SCTP_STAT_INCR(sctps_badsid);
1829 SCTP_TCB_LOCK_ASSERT(stcb);
1830 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1831 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1832 asoc->highest_tsn_inside_nr_map = tsn;
1834 if (tsn == (asoc->cumulative_tsn + 1)) {
1835 /* Update cum-ack */
1836 asoc->cumulative_tsn = tsn;
1841 * If its a fragmented message, lets see if we can find the control
1842 * on the reassembly queues.
1844 if ((chk_type == SCTP_IDATA) &&
1845 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1848 * The first *must* be fsn 0, and other (middle/end) pieces
1849 * can *not* be fsn 0. XXX: This can happen in case of a
1850 * wrap around. Ignore is for now.
1852 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1856 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1857 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1858 chk_flags, control);
1859 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1860 /* See if we can find the re-assembly entity */
1861 if (control != NULL) {
1862 /* We found something, does it belong? */
1863 if (ordered && (mid != control->mid)) {
1864 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1866 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1867 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1868 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1872 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1874 * We can't have a switched order with an
1877 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1881 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1883 * We can't have a switched unordered with a
1886 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1893 * Its a complete segment. Lets validate we don't have a
1894 * re-assembly going on with the same Stream/Seq (for
1895 * ordered) or in the same Stream for unordered.
1897 if (control != NULL) {
1898 if (ordered || asoc->idata_supported) {
1899 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1901 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1904 if ((tsn == control->fsn_included + 1) &&
1905 (control->end_added == 0)) {
1906 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1914 /* now do the tests */
1915 if (((asoc->cnt_on_all_streams +
1916 asoc->cnt_on_reasm_queue +
1917 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1918 (((int)asoc->my_rwnd) <= 0)) {
1920 * When we have NO room in the rwnd we check to make sure
1921 * the reader is doing its job...
1923 if (stcb->sctp_socket->so_rcv.sb_cc) {
1924 /* some to read, wake-up */
1925 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1928 so = SCTP_INP_SO(stcb->sctp_ep);
1929 atomic_add_int(&stcb->asoc.refcnt, 1);
1930 SCTP_TCB_UNLOCK(stcb);
1931 SCTP_SOCKET_LOCK(so, 1);
1932 SCTP_TCB_LOCK(stcb);
1933 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1934 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1935 /* assoc was freed while we were unlocked */
1936 SCTP_SOCKET_UNLOCK(so, 1);
1940 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1941 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1942 SCTP_SOCKET_UNLOCK(so, 1);
1945 /* now is it in the mapping array of what we have accepted? */
1946 if (chk_type == SCTP_DATA) {
1947 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1948 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1949 /* Nope not in the valid range dump it */
1951 sctp_set_rwnd(stcb, asoc);
1952 if ((asoc->cnt_on_all_streams +
1953 asoc->cnt_on_reasm_queue +
1954 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1955 SCTP_STAT_INCR(sctps_datadropchklmt);
1957 SCTP_STAT_INCR(sctps_datadroprwnd);
1963 if (control == NULL) {
1966 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1971 #ifdef SCTP_ASOCLOG_OF_TSNS
1972 SCTP_TCB_LOCK_ASSERT(stcb);
1973 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1974 asoc->tsn_in_at = 0;
1975 asoc->tsn_in_wrapped = 1;
1977 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1978 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1979 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1980 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1981 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1982 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1983 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1984 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1988 * Before we continue lets validate that we are not being fooled by
1989 * an evil attacker. We can only have Nk chunks based on our TSN
1990 * spread allowed by the mapping array N * 8 bits, so there is no
1991 * way our stream sequence numbers could have wrapped. We of course
1992 * only validate the FIRST fragment so the bit must be set.
1994 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1995 (TAILQ_EMPTY(&asoc->resetHead)) &&
1996 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1997 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1998 /* The incoming sseq is behind where we last delivered? */
1999 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2000 mid, asoc->strmin[sid].last_mid_delivered);
2002 if (asoc->idata_supported) {
2003 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2004 asoc->strmin[sid].last_mid_delivered,
2009 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2010 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2015 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2016 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2017 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2021 if (chk_type == SCTP_IDATA) {
2022 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2024 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2026 if (last_chunk == 0) {
2027 if (chk_type == SCTP_IDATA) {
2028 dmbuf = SCTP_M_COPYM(*m,
2029 (offset + sizeof(struct sctp_idata_chunk)),
2032 dmbuf = SCTP_M_COPYM(*m,
2033 (offset + sizeof(struct sctp_data_chunk)),
2036 #ifdef SCTP_MBUF_LOGGING
2037 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2038 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2042 /* We can steal the last chunk */
2046 /* lop off the top part */
2047 if (chk_type == SCTP_IDATA) {
2048 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2050 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2052 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2053 l_len = SCTP_BUF_LEN(dmbuf);
2056 * need to count up the size hopefully does not hit
2062 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2063 l_len += SCTP_BUF_LEN(lat);
2066 if (l_len > the_len) {
2067 /* Trim the end round bytes off too */
2068 m_adj(dmbuf, -(l_len - the_len));
2071 if (dmbuf == NULL) {
2072 SCTP_STAT_INCR(sctps_nomem);
2076 * Now no matter what, we need a control, get one if we don't have
2077 * one (we may have gotten it above when we found the message was
2080 if (control == NULL) {
2081 sctp_alloc_a_readq(stcb, control);
2082 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2087 if (control == NULL) {
2088 SCTP_STAT_INCR(sctps_nomem);
2091 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2094 control->data = dmbuf;
2095 for (mm = control->data; mm; mm = mm->m_next) {
2096 control->length += SCTP_BUF_LEN(mm);
2098 control->tail_mbuf = NULL;
2099 control->end_added = 1;
2100 control->last_frag_seen = 1;
2101 control->first_frag_seen = 1;
2102 control->fsn_included = fsn;
2103 control->top_fsn = fsn;
2105 created_control = 1;
2107 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2108 chk_flags, ordered, mid, control);
2109 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2110 TAILQ_EMPTY(&asoc->resetHead) &&
2112 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2113 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2114 /* Candidate for express delivery */
2116 * Its not fragmented, No PD-API is up, Nothing in the
2117 * delivery queue, Its un-ordered OR ordered and the next to
2118 * deliver AND nothing else is stuck on the stream queue,
2119 * And there is room for it in the socket buffer. Lets just
2120 * stuff it up the buffer....
2122 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2123 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2124 asoc->highest_tsn_inside_nr_map = tsn;
2126 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2129 sctp_add_to_readq(stcb->sctp_ep, stcb,
2130 control, &stcb->sctp_socket->so_rcv,
2131 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2133 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2134 /* for ordered, bump what we delivered */
2135 asoc->strmin[sid].last_mid_delivered++;
2137 SCTP_STAT_INCR(sctps_recvexpress);
2138 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2139 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2140 SCTP_STR_LOG_FROM_EXPRS_DEL);
2143 goto finish_express_del;
2146 /* Now will we need a chunk too? */
2147 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2148 sctp_alloc_a_chunk(stcb, chk);
2150 /* No memory so we drop the chunk */
2151 SCTP_STAT_INCR(sctps_nomem);
2152 if (last_chunk == 0) {
2153 /* we copied it, free the copy */
2154 sctp_m_freem(dmbuf);
2158 chk->rec.data.tsn = tsn;
2159 chk->no_fr_allowed = 0;
2160 chk->rec.data.fsn = fsn;
2161 chk->rec.data.mid = mid;
2162 chk->rec.data.sid = sid;
2163 chk->rec.data.ppid = ppid;
2164 chk->rec.data.context = stcb->asoc.context;
2165 chk->rec.data.doing_fast_retransmit = 0;
2166 chk->rec.data.rcv_flags = chk_flags;
2168 chk->send_size = the_len;
2170 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2173 atomic_add_int(&net->ref_count, 1);
2176 /* Set the appropriate TSN mark */
2177 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2178 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2179 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2180 asoc->highest_tsn_inside_nr_map = tsn;
2183 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2184 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2185 asoc->highest_tsn_inside_map = tsn;
2188 /* Now is it complete (i.e. not fragmented)? */
2189 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2191 * Special check for when streams are resetting. We could be
2192 * more smart about this and check the actual stream to see
2193 * if it is not being reset.. that way we would not create a
2194 * HOLB when amongst streams being reset and those not being
2198 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2199 SCTP_TSN_GT(tsn, liste->tsn)) {
2201 * yep its past where we need to reset... go ahead
2204 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2206 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2208 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2209 unsigned char inserted = 0;
2211 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2212 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2217 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2222 if (inserted == 0) {
2224 * must be put at end, use prevP
2225 * (all setup from loop) to setup
2228 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2231 goto finish_express_del;
2233 if (chk_flags & SCTP_DATA_UNORDERED) {
2234 /* queue directly into socket buffer */
2235 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2237 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2238 sctp_add_to_readq(stcb->sctp_ep, stcb,
2240 &stcb->sctp_socket->so_rcv, 1,
2241 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2244 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2246 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2254 goto finish_express_del;
2256 /* If we reach here its a reassembly */
2257 need_reasm_check = 1;
2258 SCTPDBG(SCTP_DEBUG_XXX,
2259 "Queue data to stream for reasm control: %p MID: %u\n",
2261 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2264 * the assoc is now gone and chk was put onto the reasm
2265 * queue, which has all been freed.
2273 /* Here we tidy up things */
2274 if (tsn == (asoc->cumulative_tsn + 1)) {
2275 /* Update cum-ack */
2276 asoc->cumulative_tsn = tsn;
2282 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2284 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2286 SCTP_STAT_INCR(sctps_recvdata);
2287 /* Set it present please */
2288 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2289 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2291 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2292 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2293 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2295 if (need_reasm_check) {
2296 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2297 need_reasm_check = 0;
2299 /* check the special flag for stream resets */
2300 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2301 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2303 * we have finished working through the backlogged TSN's now
2304 * time to reset streams. 1: call reset function. 2: free
2305 * pending_reply space 3: distribute any chunks in
2306 * pending_reply_queue.
2308 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2309 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2310 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2311 SCTP_FREE(liste, SCTP_M_STRESET);
2312 /* sa_ignore FREED_MEMORY */
2313 liste = TAILQ_FIRST(&asoc->resetHead);
2314 if (TAILQ_EMPTY(&asoc->resetHead)) {
2315 /* All can be removed */
2316 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2317 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2318 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2322 if (need_reasm_check) {
2323 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2324 need_reasm_check = 0;
2328 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2329 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2333 * if control->sinfo_tsn is <= liste->tsn we
2334 * can process it which is the NOT of
2335 * control->sinfo_tsn > liste->tsn
2337 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2338 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2342 if (need_reasm_check) {
2343 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2344 need_reasm_check = 0;
2352 static const int8_t sctp_map_lookup_tab[256] = {
2353 0, 1, 0, 2, 0, 1, 0, 3,
2354 0, 1, 0, 2, 0, 1, 0, 4,
2355 0, 1, 0, 2, 0, 1, 0, 3,
2356 0, 1, 0, 2, 0, 1, 0, 5,
2357 0, 1, 0, 2, 0, 1, 0, 3,
2358 0, 1, 0, 2, 0, 1, 0, 4,
2359 0, 1, 0, 2, 0, 1, 0, 3,
2360 0, 1, 0, 2, 0, 1, 0, 6,
2361 0, 1, 0, 2, 0, 1, 0, 3,
2362 0, 1, 0, 2, 0, 1, 0, 4,
2363 0, 1, 0, 2, 0, 1, 0, 3,
2364 0, 1, 0, 2, 0, 1, 0, 5,
2365 0, 1, 0, 2, 0, 1, 0, 3,
2366 0, 1, 0, 2, 0, 1, 0, 4,
2367 0, 1, 0, 2, 0, 1, 0, 3,
2368 0, 1, 0, 2, 0, 1, 0, 7,
2369 0, 1, 0, 2, 0, 1, 0, 3,
2370 0, 1, 0, 2, 0, 1, 0, 4,
2371 0, 1, 0, 2, 0, 1, 0, 3,
2372 0, 1, 0, 2, 0, 1, 0, 5,
2373 0, 1, 0, 2, 0, 1, 0, 3,
2374 0, 1, 0, 2, 0, 1, 0, 4,
2375 0, 1, 0, 2, 0, 1, 0, 3,
2376 0, 1, 0, 2, 0, 1, 0, 6,
2377 0, 1, 0, 2, 0, 1, 0, 3,
2378 0, 1, 0, 2, 0, 1, 0, 4,
2379 0, 1, 0, 2, 0, 1, 0, 3,
2380 0, 1, 0, 2, 0, 1, 0, 5,
2381 0, 1, 0, 2, 0, 1, 0, 3,
2382 0, 1, 0, 2, 0, 1, 0, 4,
2383 0, 1, 0, 2, 0, 1, 0, 3,
2384 0, 1, 0, 2, 0, 1, 0, 8
2389 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2392 * Now we also need to check the mapping array in a couple of ways.
2393 * 1) Did we move the cum-ack point?
2395 * When you first glance at this you might think that all entries
2396 * that make up the position of the cum-ack would be in the
2397 * nr-mapping array only.. i.e. things up to the cum-ack are always
2398 * deliverable. Thats true with one exception, when its a fragmented
2399 * message we may not deliver the data until some threshold (or all
2400 * of it) is in place. So we must OR the nr_mapping_array and
2401 * mapping_array to get a true picture of the cum-ack.
2403 struct sctp_association *asoc;
2406 int slide_from, slide_end, lgap, distance;
2407 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2411 old_cumack = asoc->cumulative_tsn;
2412 old_base = asoc->mapping_array_base_tsn;
2413 old_highest = asoc->highest_tsn_inside_map;
2415 * We could probably improve this a small bit by calculating the
2416 * offset of the current cum-ack as the starting point.
2419 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2420 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2424 /* there is a 0 bit */
2425 at += sctp_map_lookup_tab[val];
2429 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2431 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2432 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2434 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2435 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2437 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2438 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2439 sctp_print_mapping_array(asoc);
2440 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2441 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2443 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2444 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2447 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2448 highest_tsn = asoc->highest_tsn_inside_nr_map;
2450 highest_tsn = asoc->highest_tsn_inside_map;
2452 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2453 /* The complete array was completed by a single FR */
2454 /* highest becomes the cum-ack */
2460 /* clear the array */
2461 clr = ((at + 7) >> 3);
2462 if (clr > asoc->mapping_array_size) {
2463 clr = asoc->mapping_array_size;
2465 memset(asoc->mapping_array, 0, clr);
2466 memset(asoc->nr_mapping_array, 0, clr);
2468 for (i = 0; i < asoc->mapping_array_size; i++) {
2469 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2470 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2471 sctp_print_mapping_array(asoc);
2475 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2476 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2477 } else if (at >= 8) {
2478 /* we can slide the mapping array down */
2479 /* slide_from holds where we hit the first NON 0xff byte */
2482 * now calculate the ceiling of the move using our highest
2485 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2486 slide_end = (lgap >> 3);
2487 if (slide_end < slide_from) {
2488 sctp_print_mapping_array(asoc);
2490 panic("impossible slide");
2492 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2493 lgap, slide_end, slide_from, at);
2497 if (slide_end > asoc->mapping_array_size) {
2499 panic("would overrun buffer");
2501 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2502 asoc->mapping_array_size, slide_end);
2503 slide_end = asoc->mapping_array_size;
2506 distance = (slide_end - slide_from) + 1;
2507 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2508 sctp_log_map(old_base, old_cumack, old_highest,
2509 SCTP_MAP_PREPARE_SLIDE);
2510 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2511 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2513 if (distance + slide_from > asoc->mapping_array_size ||
2516 * Here we do NOT slide forward the array so that
2517 * hopefully when more data comes in to fill it up
2518 * we will be able to slide it forward. Really I
2519 * don't think this should happen :-0
2522 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2523 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2524 (uint32_t)asoc->mapping_array_size,
2525 SCTP_MAP_SLIDE_NONE);
2530 for (ii = 0; ii < distance; ii++) {
2531 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2532 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2535 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2536 asoc->mapping_array[ii] = 0;
2537 asoc->nr_mapping_array[ii] = 0;
2539 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2540 asoc->highest_tsn_inside_map += (slide_from << 3);
2542 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2543 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2545 asoc->mapping_array_base_tsn += (slide_from << 3);
2546 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2547 sctp_log_map(asoc->mapping_array_base_tsn,
2548 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2549 SCTP_MAP_SLIDE_RESULT);
2556 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2558 struct sctp_association *asoc;
2559 uint32_t highest_tsn;
2562 sctp_slide_mapping_arrays(stcb);
2564 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2565 highest_tsn = asoc->highest_tsn_inside_nr_map;
2567 highest_tsn = asoc->highest_tsn_inside_map;
2569 /* Is there a gap now? */
2570 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2573 * Now we need to see if we need to queue a sack or just start the
2574 * timer (if allowed).
2576 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2578 * Ok special case, in SHUTDOWN-SENT case. here we maker
2579 * sure SACK timer is off and instead send a SHUTDOWN and a
2582 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2583 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2584 stcb->sctp_ep, stcb, NULL,
2585 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2587 sctp_send_shutdown(stcb,
2588 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2590 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2594 * CMT DAC algorithm: increase number of packets received
2597 stcb->asoc.cmt_dac_pkts_rcvd++;
2599 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2601 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2603 (stcb->asoc.numduptsns) || /* we have dup's */
2604 (is_a_gap) || /* is still a gap */
2605 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2606 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2609 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2610 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2611 (stcb->asoc.send_sack == 0) &&
2612 (stcb->asoc.numduptsns == 0) &&
2613 (stcb->asoc.delayed_ack) &&
2614 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2617 * CMT DAC algorithm: With CMT, delay acks
2618 * even in the face of
2620 * reordering. Therefore, if acks that do
2621 * not have to be sent because of the above
2622 * reasons, will be delayed. That is, acks
2623 * that would have been sent due to gap
2624 * reports will be delayed with DAC. Start
2625 * the delayed ack timer.
2627 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2628 stcb->sctp_ep, stcb, NULL);
2631 * Ok we must build a SACK since the timer
2632 * is pending, we got our first packet OR
2633 * there are gaps or duplicates.
2635 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2636 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2639 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2640 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2641 stcb->sctp_ep, stcb, NULL);
2648 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2649 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2650 struct sctp_nets *net, uint32_t *high_tsn)
2652 struct sctp_chunkhdr *ch, chunk_buf;
2653 struct sctp_association *asoc;
2654 int num_chunks = 0; /* number of control chunks processed */
2656 int break_flag, last_chunk;
2657 int abort_flag = 0, was_a_gap;
2659 uint32_t highest_tsn;
2660 uint16_t chk_length;
2663 sctp_set_rwnd(stcb, &stcb->asoc);
2666 SCTP_TCB_LOCK_ASSERT(stcb);
2668 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2669 highest_tsn = asoc->highest_tsn_inside_nr_map;
2671 highest_tsn = asoc->highest_tsn_inside_map;
2673 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2675 * setup where we got the last DATA packet from for any SACK that
2676 * may need to go out. Don't bump the net. This is done ONLY when a
2677 * chunk is assigned.
2679 asoc->last_data_chunk_from = net;
2682 * Now before we proceed we must figure out if this is a wasted
2683 * cluster... i.e. it is a small packet sent in and yet the driver
2684 * underneath allocated a full cluster for it. If so we must copy it
2685 * to a smaller mbuf and free up the cluster mbuf. This will help
2686 * with cluster starvation. Note for __Panda__ we don't do this
2687 * since it has clusters all the way down to 64 bytes.
2689 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2690 /* we only handle mbufs that are singletons.. not chains */
2691 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2693 /* ok lets see if we can copy the data up */
2696 /* get the pointers and copy */
2697 to = mtod(m, caddr_t *);
2698 from = mtod((*mm), caddr_t *);
2699 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2700 /* copy the length and free up the old */
2701 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2703 /* success, back copy */
2706 /* We are in trouble in the mbuf world .. yikes */
2710 /* get pointer to the first chunk header */
2711 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2712 sizeof(struct sctp_chunkhdr),
2713 (uint8_t *)&chunk_buf);
2718 * process all DATA chunks...
2720 *high_tsn = asoc->cumulative_tsn;
2722 asoc->data_pkts_seen++;
2723 while (stop_proc == 0) {
2724 /* validate chunk length */
2725 chk_length = ntohs(ch->chunk_length);
2726 if (length - *offset < chk_length) {
2727 /* all done, mutulated chunk */
2731 if ((asoc->idata_supported == 1) &&
2732 (ch->chunk_type == SCTP_DATA)) {
2733 struct mbuf *op_err;
2734 char msg[SCTP_DIAG_INFO_LEN];
2736 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2737 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2738 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2739 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2742 if ((asoc->idata_supported == 0) &&
2743 (ch->chunk_type == SCTP_IDATA)) {
2744 struct mbuf *op_err;
2745 char msg[SCTP_DIAG_INFO_LEN];
2747 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2748 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2749 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2750 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2753 if ((ch->chunk_type == SCTP_DATA) ||
2754 (ch->chunk_type == SCTP_IDATA)) {
2757 if (ch->chunk_type == SCTP_DATA) {
2758 clen = sizeof(struct sctp_data_chunk);
2760 clen = sizeof(struct sctp_idata_chunk);
2762 if (chk_length < clen) {
2764 * Need to send an abort since we had a
2765 * invalid data chunk.
2767 struct mbuf *op_err;
2768 char msg[SCTP_DIAG_INFO_LEN];
2770 snprintf(msg, sizeof(msg), "%s chunk of length %u",
2771 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2773 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2774 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2775 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2778 #ifdef SCTP_AUDITING_ENABLED
2779 sctp_audit_log(0xB1, 0);
2781 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2786 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2787 chk_length, net, high_tsn, &abort_flag, &break_flag,
2788 last_chunk, ch->chunk_type)) {
2796 * Set because of out of rwnd space and no
2797 * drop rep space left.
2803 /* not a data chunk in the data region */
2804 switch (ch->chunk_type) {
2805 case SCTP_INITIATION:
2806 case SCTP_INITIATION_ACK:
2807 case SCTP_SELECTIVE_ACK:
2808 case SCTP_NR_SELECTIVE_ACK:
2809 case SCTP_HEARTBEAT_REQUEST:
2810 case SCTP_HEARTBEAT_ACK:
2811 case SCTP_ABORT_ASSOCIATION:
2813 case SCTP_SHUTDOWN_ACK:
2814 case SCTP_OPERATION_ERROR:
2815 case SCTP_COOKIE_ECHO:
2816 case SCTP_COOKIE_ACK:
2819 case SCTP_SHUTDOWN_COMPLETE:
2820 case SCTP_AUTHENTICATION:
2821 case SCTP_ASCONF_ACK:
2822 case SCTP_PACKET_DROPPED:
2823 case SCTP_STREAM_RESET:
2824 case SCTP_FORWARD_CUM_TSN:
2828 * Now, what do we do with KNOWN
2829 * chunks that are NOT in the right
2832 * For now, I do nothing but ignore
2833 * them. We may later want to add
2834 * sysctl stuff to switch out and do
2835 * either an ABORT() or possibly
2838 struct mbuf *op_err;
2839 char msg[SCTP_DIAG_INFO_LEN];
2841 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2843 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2844 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2849 * Unknown chunk type: use bit rules after
2852 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2854 * Need to send an abort since we
2855 * had a invalid chunk.
2857 struct mbuf *op_err;
2858 char msg[SCTP_DIAG_INFO_LEN];
2860 snprintf(msg, sizeof(msg), "Chunk of length %u",
2862 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2863 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2864 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2867 if (ch->chunk_type & 0x40) {
2868 /* Add a error report to the queue */
2869 struct mbuf *op_err;
2870 struct sctp_gen_error_cause *cause;
2872 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2873 0, M_NOWAIT, 1, MT_DATA);
2874 if (op_err != NULL) {
2875 cause = mtod(op_err, struct sctp_gen_error_cause *);
2876 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2877 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2878 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2879 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2880 if (SCTP_BUF_NEXT(op_err) != NULL) {
2881 sctp_queue_op_err(stcb, op_err);
2883 sctp_m_freem(op_err);
2887 if ((ch->chunk_type & 0x80) == 0) {
2888 /* discard the rest of this packet */
2890 } /* else skip this bad chunk and
2893 } /* switch of chunk type */
2895 *offset += SCTP_SIZE32(chk_length);
2896 if ((*offset >= length) || stop_proc) {
2897 /* no more data left in the mbuf chain */
2901 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2902 sizeof(struct sctp_chunkhdr),
2903 (uint8_t *)&chunk_buf);
2912 * we need to report rwnd overrun drops.
2914 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2918 * Did we get data, if so update the time for auto-close and
2919 * give peer credit for being alive.
2921 SCTP_STAT_INCR(sctps_recvpktwithdata);
2922 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2923 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2924 stcb->asoc.overall_error_count,
2926 SCTP_FROM_SCTP_INDATA,
2929 stcb->asoc.overall_error_count = 0;
2930 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2932 /* now service all of the reassm queue if needed */
2933 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2934 /* Assure that we ack right away */
2935 stcb->asoc.send_sack = 1;
2937 /* Start a sack timer or QUEUE a SACK for sending */
2938 sctp_sack_check(stcb, was_a_gap);
2943 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2944 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2946 uint32_t *biggest_newly_acked_tsn,
2947 uint32_t *this_sack_lowest_newack,
2950 struct sctp_tmit_chunk *tp1;
2951 unsigned int theTSN;
2952 int j, wake_him = 0, circled = 0;
2954 /* Recover the tp1 we last saw */
2957 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2959 for (j = frag_strt; j <= frag_end; j++) {
2960 theTSN = j + last_tsn;
2962 if (tp1->rec.data.doing_fast_retransmit)
2966 * CMT: CUCv2 algorithm. For each TSN being
2967 * processed from the sent queue, track the
2968 * next expected pseudo-cumack, or
2969 * rtx_pseudo_cumack, if required. Separate
2970 * cumack trackers for first transmissions,
2971 * and retransmissions.
2973 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2974 (tp1->whoTo->find_pseudo_cumack == 1) &&
2975 (tp1->snd_count == 1)) {
2976 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2977 tp1->whoTo->find_pseudo_cumack = 0;
2979 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2980 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2981 (tp1->snd_count > 1)) {
2982 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2983 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2985 if (tp1->rec.data.tsn == theTSN) {
2986 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2988 * must be held until
2991 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2993 * If it is less than RESEND, it is
2994 * now no-longer in flight.
2995 * Higher values may already be set
2996 * via previous Gap Ack Blocks...
2997 * i.e. ACKED or RESEND.
2999 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3000 *biggest_newly_acked_tsn)) {
3001 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3004 * CMT: SFR algo (and HTNA) - set
3005 * saw_newack to 1 for dest being
3006 * newly acked. update
3007 * this_sack_highest_newack if
3010 if (tp1->rec.data.chunk_was_revoked == 0)
3011 tp1->whoTo->saw_newack = 1;
3013 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3014 tp1->whoTo->this_sack_highest_newack)) {
3015 tp1->whoTo->this_sack_highest_newack =
3019 * CMT DAC algo: also update
3020 * this_sack_lowest_newack
3022 if (*this_sack_lowest_newack == 0) {
3023 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3024 sctp_log_sack(*this_sack_lowest_newack,
3029 SCTP_LOG_TSN_ACKED);
3031 *this_sack_lowest_newack = tp1->rec.data.tsn;
3034 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3035 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3036 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3037 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3038 * Separate pseudo_cumack trackers for first transmissions and
3041 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3042 if (tp1->rec.data.chunk_was_revoked == 0) {
3043 tp1->whoTo->new_pseudo_cumack = 1;
3045 tp1->whoTo->find_pseudo_cumack = 1;
3047 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3048 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3050 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3051 if (tp1->rec.data.chunk_was_revoked == 0) {
3052 tp1->whoTo->new_pseudo_cumack = 1;
3054 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3056 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3057 sctp_log_sack(*biggest_newly_acked_tsn,
3062 SCTP_LOG_TSN_ACKED);
3064 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3065 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3066 tp1->whoTo->flight_size,
3068 (uint32_t)(uintptr_t)tp1->whoTo,
3071 sctp_flight_size_decrease(tp1);
3072 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3073 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3076 sctp_total_flight_decrease(stcb, tp1);
3078 tp1->whoTo->net_ack += tp1->send_size;
3079 if (tp1->snd_count < 2) {
3081 * True non-retransmitted chunk
3083 tp1->whoTo->net_ack2 += tp1->send_size;
3091 sctp_calculate_rto(stcb,
3094 &tp1->sent_rcv_time,
3095 SCTP_RTT_FROM_DATA);
3098 if (tp1->whoTo->rto_needed == 0) {
3099 tp1->whoTo->rto_needed = 1;
3106 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3107 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3108 stcb->asoc.this_sack_highest_gap)) {
3109 stcb->asoc.this_sack_highest_gap =
3112 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3113 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3114 #ifdef SCTP_AUDITING_ENABLED
3115 sctp_audit_log(0xB2,
3116 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3121 * All chunks NOT UNSENT fall through here and are marked
3122 * (leave PR-SCTP ones that are to skip alone though)
3124 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3125 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3126 tp1->sent = SCTP_DATAGRAM_MARKED;
3128 if (tp1->rec.data.chunk_was_revoked) {
3129 /* deflate the cwnd */
3130 tp1->whoTo->cwnd -= tp1->book_size;
3131 tp1->rec.data.chunk_was_revoked = 0;
3133 /* NR Sack code here */
3135 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3136 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3137 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3140 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3143 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3144 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3145 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3146 stcb->asoc.trigger_reset = 1;
3148 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3154 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3155 sctp_m_freem(tp1->data);
3162 } /* if (tp1->tsn == theTSN) */
3163 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3166 tp1 = TAILQ_NEXT(tp1, sctp_next);
3167 if ((tp1 == NULL) && (circled == 0)) {
3169 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3171 } /* end while (tp1) */
3174 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3176 /* In case the fragments were not in order we must reset */
3177 } /* end for (j = fragStart */
3179 return (wake_him); /* Return value only used for nr-sack */
3184 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3185 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3186 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3187 int num_seg, int num_nr_seg, int *rto_ok)
3189 struct sctp_gap_ack_block *frag, block;
3190 struct sctp_tmit_chunk *tp1;
3195 uint16_t frag_strt, frag_end, prev_frag_end;
3197 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3201 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3204 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3206 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3207 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3208 *offset += sizeof(block);
3210 return (chunk_freed);
3212 frag_strt = ntohs(frag->start);
3213 frag_end = ntohs(frag->end);
3215 if (frag_strt > frag_end) {
3216 /* This gap report is malformed, skip it. */
3219 if (frag_strt <= prev_frag_end) {
3220 /* This gap report is not in order, so restart. */
3221 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3223 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3224 *biggest_tsn_acked = last_tsn + frag_end;
3231 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3232 non_revocable, &num_frs, biggest_newly_acked_tsn,
3233 this_sack_lowest_newack, rto_ok)) {
3236 prev_frag_end = frag_end;
3238 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3240 sctp_log_fr(*biggest_tsn_acked,
3241 *biggest_newly_acked_tsn,
3242 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3244 return (chunk_freed);
3248 sctp_check_for_revoked(struct sctp_tcb *stcb,
3249 struct sctp_association *asoc, uint32_t cumack,
3250 uint32_t biggest_tsn_acked)
3252 struct sctp_tmit_chunk *tp1;
3254 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3255 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3257 * ok this guy is either ACK or MARKED. If it is
3258 * ACKED it has been previously acked but not this
3259 * time i.e. revoked. If it is MARKED it was ACK'ed
3262 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3265 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3266 /* it has been revoked */
3267 tp1->sent = SCTP_DATAGRAM_SENT;
3268 tp1->rec.data.chunk_was_revoked = 1;
3270 * We must add this stuff back in to assure
3271 * timers and such get started.
3273 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3274 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3275 tp1->whoTo->flight_size,
3277 (uint32_t)(uintptr_t)tp1->whoTo,
3280 sctp_flight_size_increase(tp1);
3281 sctp_total_flight_increase(stcb, tp1);
3283 * We inflate the cwnd to compensate for our
3284 * artificial inflation of the flight_size.
3286 tp1->whoTo->cwnd += tp1->book_size;
3287 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3288 sctp_log_sack(asoc->last_acked_seq,
3293 SCTP_LOG_TSN_REVOKED);
3295 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3296 /* it has been re-acked in this SACK */
3297 tp1->sent = SCTP_DATAGRAM_ACKED;
3300 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3307 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3308 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3310 struct sctp_tmit_chunk *tp1;
3311 int strike_flag = 0;
3313 int tot_retrans = 0;
3314 uint32_t sending_seq;
3315 struct sctp_nets *net;
3316 int num_dests_sacked = 0;
3319 * select the sending_seq, this is either the next thing ready to be
3320 * sent but not transmitted, OR, the next seq we assign.
3322 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3324 sending_seq = asoc->sending_seq;
3326 sending_seq = tp1->rec.data.tsn;
3329 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3330 if ((asoc->sctp_cmt_on_off > 0) &&
3331 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3332 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3333 if (net->saw_newack)
3337 if (stcb->asoc.prsctp_supported) {
3338 (void)SCTP_GETTIME_TIMEVAL(&now);
3340 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3342 if (tp1->no_fr_allowed) {
3343 /* this one had a timeout or something */
3346 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3347 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3348 sctp_log_fr(biggest_tsn_newly_acked,
3351 SCTP_FR_LOG_CHECK_STRIKE);
3353 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3354 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3358 if (stcb->asoc.prsctp_supported) {
3359 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3360 /* Is it expired? */
3361 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3362 /* Yes so drop it */
3363 if (tp1->data != NULL) {
3364 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3365 SCTP_SO_NOT_LOCKED);
3372 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3373 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3374 /* we are beyond the tsn in the sack */
3377 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3378 /* either a RESEND, ACKED, or MARKED */
3380 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3381 /* Continue strikin FWD-TSN chunks */
3382 tp1->rec.data.fwd_tsn_cnt++;
3387 * CMT : SFR algo (covers part of DAC and HTNA as well)
3389 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3391 * No new acks were receieved for data sent to this
3392 * dest. Therefore, according to the SFR algo for
3393 * CMT, no data sent to this dest can be marked for
3394 * FR using this SACK.
3397 } else if (tp1->whoTo &&
3398 SCTP_TSN_GT(tp1->rec.data.tsn,
3399 tp1->whoTo->this_sack_highest_newack) &&
3400 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3402 * CMT: New acks were receieved for data sent to
3403 * this dest. But no new acks were seen for data
3404 * sent after tp1. Therefore, according to the SFR
3405 * algo for CMT, tp1 cannot be marked for FR using
3406 * this SACK. This step covers part of the DAC algo
3407 * and the HTNA algo as well.
3412 * Here we check to see if we were have already done a FR
3413 * and if so we see if the biggest TSN we saw in the sack is
3414 * smaller than the recovery point. If so we don't strike
3415 * the tsn... otherwise we CAN strike the TSN.
3418 * @@@ JRI: Check for CMT if (accum_moved &&
3419 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3422 if (accum_moved && asoc->fast_retran_loss_recovery) {
3424 * Strike the TSN if in fast-recovery and cum-ack
3427 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3428 sctp_log_fr(biggest_tsn_newly_acked,
3431 SCTP_FR_LOG_STRIKE_CHUNK);
3433 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3436 if ((asoc->sctp_cmt_on_off > 0) &&
3437 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3439 * CMT DAC algorithm: If SACK flag is set to
3440 * 0, then lowest_newack test will not pass
3441 * because it would have been set to the
3442 * cumack earlier. If not already to be
3443 * rtx'd, If not a mixed sack and if tp1 is
3444 * not between two sacked TSNs, then mark by
3445 * one more. NOTE that we are marking by one
3446 * additional time since the SACK DAC flag
3447 * indicates that two packets have been
3448 * received after this missing TSN.
3450 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3451 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3452 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3453 sctp_log_fr(16 + num_dests_sacked,
3456 SCTP_FR_LOG_STRIKE_CHUNK);
3461 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3462 (asoc->sctp_cmt_on_off == 0)) {
3464 * For those that have done a FR we must take
3465 * special consideration if we strike. I.e the
3466 * biggest_newly_acked must be higher than the
3467 * sending_seq at the time we did the FR.
3470 #ifdef SCTP_FR_TO_ALTERNATE
3472 * If FR's go to new networks, then we must only do
3473 * this for singly homed asoc's. However if the FR's
3474 * go to the same network (Armando's work) then its
3475 * ok to FR multiple times.
3483 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3484 tp1->rec.data.fast_retran_tsn)) {
3486 * Strike the TSN, since this ack is
3487 * beyond where things were when we
3490 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3491 sctp_log_fr(biggest_tsn_newly_acked,
3494 SCTP_FR_LOG_STRIKE_CHUNK);
3496 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3500 if ((asoc->sctp_cmt_on_off > 0) &&
3501 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3503 * CMT DAC algorithm: If
3504 * SACK flag is set to 0,
3505 * then lowest_newack test
3506 * will not pass because it
3507 * would have been set to
3508 * the cumack earlier. If
3509 * not already to be rtx'd,
3510 * If not a mixed sack and
3511 * if tp1 is not between two
3512 * sacked TSNs, then mark by
3513 * one more. NOTE that we
3514 * are marking by one
3515 * additional time since the
3516 * SACK DAC flag indicates
3517 * that two packets have
3518 * been received after this
3521 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3522 (num_dests_sacked == 1) &&
3523 SCTP_TSN_GT(this_sack_lowest_newack,
3524 tp1->rec.data.tsn)) {
3525 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3526 sctp_log_fr(32 + num_dests_sacked,
3529 SCTP_FR_LOG_STRIKE_CHUNK);
3531 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3539 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3542 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3543 biggest_tsn_newly_acked)) {
3545 * We don't strike these: This is the HTNA
3546 * algorithm i.e. we don't strike If our TSN is
3547 * larger than the Highest TSN Newly Acked.
3551 /* Strike the TSN */
3552 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3553 sctp_log_fr(biggest_tsn_newly_acked,
3556 SCTP_FR_LOG_STRIKE_CHUNK);
3558 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3561 if ((asoc->sctp_cmt_on_off > 0) &&
3562 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3564 * CMT DAC algorithm: If SACK flag is set to
3565 * 0, then lowest_newack test will not pass
3566 * because it would have been set to the
3567 * cumack earlier. If not already to be
3568 * rtx'd, If not a mixed sack and if tp1 is
3569 * not between two sacked TSNs, then mark by
3570 * one more. NOTE that we are marking by one
3571 * additional time since the SACK DAC flag
3572 * indicates that two packets have been
3573 * received after this missing TSN.
3575 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3576 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3577 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3578 sctp_log_fr(48 + num_dests_sacked,
3581 SCTP_FR_LOG_STRIKE_CHUNK);
3587 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3588 struct sctp_nets *alt;
3590 /* fix counts and things */
3591 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3592 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3593 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3595 (uint32_t)(uintptr_t)tp1->whoTo,
3599 tp1->whoTo->net_ack++;
3600 sctp_flight_size_decrease(tp1);
3601 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3602 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3607 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3608 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3609 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3611 /* add back to the rwnd */
3612 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3614 /* remove from the total flight */
3615 sctp_total_flight_decrease(stcb, tp1);
3617 if ((stcb->asoc.prsctp_supported) &&
3618 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3620 * Has it been retransmitted tv_sec times? -
3621 * we store the retran count there.
3623 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3624 /* Yes, so drop it */
3625 if (tp1->data != NULL) {
3626 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3627 SCTP_SO_NOT_LOCKED);
3629 /* Make sure to flag we had a FR */
3630 if (tp1->whoTo != NULL) {
3631 tp1->whoTo->net_ack++;
3637 * SCTP_PRINTF("OK, we are now ready to FR this
3640 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3641 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3645 /* This is a subsequent FR */
3646 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3648 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3649 if (asoc->sctp_cmt_on_off > 0) {
3651 * CMT: Using RTX_SSTHRESH policy for CMT.
3652 * If CMT is being used, then pick dest with
3653 * largest ssthresh for any retransmission.
3655 tp1->no_fr_allowed = 1;
3657 /* sa_ignore NO_NULL_CHK */
3658 if (asoc->sctp_cmt_pf > 0) {
3660 * JRS 5/18/07 - If CMT PF is on,
3661 * use the PF version of
3664 alt = sctp_find_alternate_net(stcb, alt, 2);
3667 * JRS 5/18/07 - If only CMT is on,
3668 * use the CMT version of
3671 /* sa_ignore NO_NULL_CHK */
3672 alt = sctp_find_alternate_net(stcb, alt, 1);
3678 * CUCv2: If a different dest is picked for
3679 * the retransmission, then new
3680 * (rtx-)pseudo_cumack needs to be tracked
3681 * for orig dest. Let CUCv2 track new (rtx-)
3682 * pseudo-cumack always.
3685 tp1->whoTo->find_pseudo_cumack = 1;
3686 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3689 } else { /* CMT is OFF */
3691 #ifdef SCTP_FR_TO_ALTERNATE
3692 /* Can we find an alternate? */
3693 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3696 * default behavior is to NOT retransmit
3697 * FR's to an alternate. Armando Caro's
3698 * paper details why.
3704 tp1->rec.data.doing_fast_retransmit = 1;
3706 /* mark the sending seq for possible subsequent FR's */
3708 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3709 * (uint32_t)tpi->rec.data.tsn);
3711 if (TAILQ_EMPTY(&asoc->send_queue)) {
3713 * If the queue of send is empty then its
3714 * the next sequence number that will be
3715 * assigned so we subtract one from this to
3716 * get the one we last sent.
3718 tp1->rec.data.fast_retran_tsn = sending_seq;
3721 * If there are chunks on the send queue
3722 * (unsent data that has made it from the
3723 * stream queues but not out the door, we
3724 * take the first one (which will have the
3725 * lowest TSN) and subtract one to get the
3728 struct sctp_tmit_chunk *ttt;
3730 ttt = TAILQ_FIRST(&asoc->send_queue);
3731 tp1->rec.data.fast_retran_tsn =
3737 * this guy had a RTO calculation pending on
3740 if ((tp1->whoTo != NULL) &&
3741 (tp1->whoTo->rto_needed == 0)) {
3742 tp1->whoTo->rto_needed = 1;
3746 if (alt != tp1->whoTo) {
3747 /* yes, there is an alternate. */
3748 sctp_free_remote_addr(tp1->whoTo);
3749 /* sa_ignore FREED_MEMORY */
3751 atomic_add_int(&alt->ref_count, 1);
3757 struct sctp_tmit_chunk *
3758 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3759 struct sctp_association *asoc)
3761 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3765 if (asoc->prsctp_supported == 0) {
3768 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3769 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3770 tp1->sent != SCTP_DATAGRAM_RESEND &&
3771 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3772 /* no chance to advance, out of here */
3775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3776 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3777 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3778 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3779 asoc->advanced_peer_ack_point,
3780 tp1->rec.data.tsn, 0, 0);
3783 if (!PR_SCTP_ENABLED(tp1->flags)) {
3785 * We can't fwd-tsn past any that are reliable aka
3786 * retransmitted until the asoc fails.
3791 (void)SCTP_GETTIME_TIMEVAL(&now);
3795 * now we got a chunk which is marked for another
3796 * retransmission to a PR-stream but has run out its chances
3797 * already maybe OR has been marked to skip now. Can we skip
3798 * it if its a resend?
3800 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3801 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3803 * Now is this one marked for resend and its time is
3806 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3807 /* Yes so drop it */
3809 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3810 1, SCTP_SO_NOT_LOCKED);
3814 * No, we are done when hit one for resend
3815 * whos time as not expired.
3821 * Ok now if this chunk is marked to drop it we can clean up
3822 * the chunk, advance our peer ack point and we can check
3825 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3826 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3827 /* advance PeerAckPoint goes forward */
3828 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3829 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3831 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3832 /* No update but we do save the chk */
3837 * If it is still in RESEND we can advance no
3847 sctp_fs_audit(struct sctp_association *asoc)
3849 struct sctp_tmit_chunk *chk;
3850 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3853 int entry_flight, entry_cnt;
3858 entry_flight = asoc->total_flight;
3859 entry_cnt = asoc->total_flight_count;
3861 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3864 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3865 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3866 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3871 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3873 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3875 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3882 if ((inflight > 0) || (inbetween > 0)) {
3884 panic("Flight size-express incorrect? \n");
3886 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3887 entry_flight, entry_cnt);
3889 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3890 inflight, inbetween, resend, above, acked);
3899 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3900 struct sctp_association *asoc,
3901 struct sctp_tmit_chunk *tp1)
3903 tp1->window_probe = 0;
3904 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3905 /* TSN's skipped we do NOT move back. */
3906 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3907 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3909 (uint32_t)(uintptr_t)tp1->whoTo,
3913 /* First setup this by shrinking flight */
3914 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3915 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3918 sctp_flight_size_decrease(tp1);
3919 sctp_total_flight_decrease(stcb, tp1);
3920 /* Now mark for resend */
3921 tp1->sent = SCTP_DATAGRAM_RESEND;
3922 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3924 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3925 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3926 tp1->whoTo->flight_size,
3928 (uint32_t)(uintptr_t)tp1->whoTo,
3934 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3935 uint32_t rwnd, int *abort_now, int ecne_seen)
3937 struct sctp_nets *net;
3938 struct sctp_association *asoc;
3939 struct sctp_tmit_chunk *tp1, *tp2;
3941 int win_probe_recovery = 0;
3942 int win_probe_recovered = 0;
3943 int j, done_once = 0;
3947 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3948 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3949 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3951 SCTP_TCB_LOCK_ASSERT(stcb);
3952 #ifdef SCTP_ASOCLOG_OF_TSNS
3953 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3954 stcb->asoc.cumack_log_at++;
3955 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3956 stcb->asoc.cumack_log_at = 0;
3960 old_rwnd = asoc->peers_rwnd;
3961 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3964 } else if (asoc->last_acked_seq == cumack) {
3965 /* Window update sack */
3966 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3967 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3968 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3969 /* SWS sender side engages */
3970 asoc->peers_rwnd = 0;
3972 if (asoc->peers_rwnd > old_rwnd) {
3978 /* First setup for CC stuff */
3979 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3980 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3981 /* Drag along the window_tsn for cwr's */
3982 net->cwr_window_tsn = cumack;
3984 net->prev_cwnd = net->cwnd;
3989 * CMT: Reset CUC and Fast recovery algo variables before
3992 net->new_pseudo_cumack = 0;
3993 net->will_exit_fast_recovery = 0;
3994 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3995 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3998 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3999 tp1 = TAILQ_LAST(&asoc->sent_queue,
4000 sctpchunk_listhead);
4001 send_s = tp1->rec.data.tsn + 1;
4003 send_s = asoc->sending_seq;
4005 if (SCTP_TSN_GE(cumack, send_s)) {
4006 struct mbuf *op_err;
4007 char msg[SCTP_DIAG_INFO_LEN];
4011 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4013 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4014 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4015 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4018 asoc->this_sack_highest_gap = cumack;
4019 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4020 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4021 stcb->asoc.overall_error_count,
4023 SCTP_FROM_SCTP_INDATA,
4026 stcb->asoc.overall_error_count = 0;
4027 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4028 /* process the new consecutive TSN first */
4029 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4030 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4031 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4032 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4034 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4036 * If it is less than ACKED, it is
4037 * now no-longer in flight. Higher
4038 * values may occur during marking
4040 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4041 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4042 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4043 tp1->whoTo->flight_size,
4045 (uint32_t)(uintptr_t)tp1->whoTo,
4048 sctp_flight_size_decrease(tp1);
4049 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4050 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4053 /* sa_ignore NO_NULL_CHK */
4054 sctp_total_flight_decrease(stcb, tp1);
4056 tp1->whoTo->net_ack += tp1->send_size;
4057 if (tp1->snd_count < 2) {
4059 * True non-retransmitted
4062 tp1->whoTo->net_ack2 +=
4065 /* update RTO too? */
4073 sctp_calculate_rto(stcb,
4075 &tp1->sent_rcv_time,
4076 SCTP_RTT_FROM_DATA);
4079 if (tp1->whoTo->rto_needed == 0) {
4080 tp1->whoTo->rto_needed = 1;
4086 * CMT: CUCv2 algorithm. From the
4087 * cumack'd TSNs, for each TSN being
4088 * acked for the first time, set the
4089 * following variables for the
4090 * corresp destination.
4091 * new_pseudo_cumack will trigger a
4093 * find_(rtx_)pseudo_cumack will
4094 * trigger search for the next
4095 * expected (rtx-)pseudo-cumack.
4097 tp1->whoTo->new_pseudo_cumack = 1;
4098 tp1->whoTo->find_pseudo_cumack = 1;
4099 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4101 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4102 /* sa_ignore NO_NULL_CHK */
4103 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4106 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4107 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4109 if (tp1->rec.data.chunk_was_revoked) {
4110 /* deflate the cwnd */
4111 tp1->whoTo->cwnd -= tp1->book_size;
4112 tp1->rec.data.chunk_was_revoked = 0;
4114 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4115 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4116 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4119 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4123 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4124 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4125 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4126 asoc->trigger_reset = 1;
4128 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4130 /* sa_ignore NO_NULL_CHK */
4131 sctp_free_bufspace(stcb, asoc, tp1, 1);
4132 sctp_m_freem(tp1->data);
4135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4136 sctp_log_sack(asoc->last_acked_seq,
4141 SCTP_LOG_FREE_SENT);
4143 asoc->sent_queue_cnt--;
4144 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4151 /* sa_ignore NO_NULL_CHK */
4152 if (stcb->sctp_socket) {
4153 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4157 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4158 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4159 /* sa_ignore NO_NULL_CHK */
4160 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4162 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4163 so = SCTP_INP_SO(stcb->sctp_ep);
4164 atomic_add_int(&stcb->asoc.refcnt, 1);
4165 SCTP_TCB_UNLOCK(stcb);
4166 SCTP_SOCKET_LOCK(so, 1);
4167 SCTP_TCB_LOCK(stcb);
4168 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4169 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4170 /* assoc was freed while we were unlocked */
4171 SCTP_SOCKET_UNLOCK(so, 1);
4175 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4176 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4177 SCTP_SOCKET_UNLOCK(so, 1);
4180 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4181 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4185 /* JRS - Use the congestion control given in the CC module */
4186 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4187 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4188 if (net->net_ack2 > 0) {
4190 * Karn's rule applies to clearing error
4191 * count, this is optional.
4193 net->error_count = 0;
4194 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4195 /* addr came good */
4196 net->dest_state |= SCTP_ADDR_REACHABLE;
4197 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4198 0, (void *)net, SCTP_SO_NOT_LOCKED);
4200 if (net == stcb->asoc.primary_destination) {
4201 if (stcb->asoc.alternate) {
4203 * release the alternate,
4206 sctp_free_remote_addr(stcb->asoc.alternate);
4207 stcb->asoc.alternate = NULL;
4210 if (net->dest_state & SCTP_ADDR_PF) {
4211 net->dest_state &= ~SCTP_ADDR_PF;
4212 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4213 stcb->sctp_ep, stcb, net,
4214 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4215 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4216 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4217 /* Done with this net */
4220 /* restore any doubled timers */
4221 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4222 if (net->RTO < stcb->asoc.minrto) {
4223 net->RTO = stcb->asoc.minrto;
4225 if (net->RTO > stcb->asoc.maxrto) {
4226 net->RTO = stcb->asoc.maxrto;
4230 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4232 asoc->last_acked_seq = cumack;
4234 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4235 /* nothing left in-flight */
4236 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4237 net->flight_size = 0;
4238 net->partial_bytes_acked = 0;
4240 asoc->total_flight = 0;
4241 asoc->total_flight_count = 0;
4245 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4246 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4247 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4248 /* SWS sender side engages */
4249 asoc->peers_rwnd = 0;
4251 if (asoc->peers_rwnd > old_rwnd) {
4252 win_probe_recovery = 1;
4254 /* Now assure a timer where data is queued at */
4257 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4258 if (win_probe_recovery && (net->window_probe)) {
4259 win_probe_recovered = 1;
4261 * Find first chunk that was used with window probe
4262 * and clear the sent
4264 /* sa_ignore FREED_MEMORY */
4265 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4266 if (tp1->window_probe) {
4267 /* move back to data send queue */
4268 sctp_window_probe_recovery(stcb, asoc, tp1);
4273 if (net->flight_size) {
4275 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4276 if (net->window_probe) {
4277 net->window_probe = 0;
4280 if (net->window_probe) {
4282 * In window probes we must assure a timer
4283 * is still running there
4285 net->window_probe = 0;
4286 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4287 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4289 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4290 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4292 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4297 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4298 (asoc->sent_queue_retran_cnt == 0) &&
4299 (win_probe_recovered == 0) &&
4302 * huh, this should not happen unless all packets are
4303 * PR-SCTP and marked to skip of course.
4305 if (sctp_fs_audit(asoc)) {
4306 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4307 net->flight_size = 0;
4309 asoc->total_flight = 0;
4310 asoc->total_flight_count = 0;
4311 asoc->sent_queue_retran_cnt = 0;
4312 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4313 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4314 sctp_flight_size_increase(tp1);
4315 sctp_total_flight_increase(stcb, tp1);
4316 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4317 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4324 /**********************************/
4325 /* Now what about shutdown issues */
4326 /**********************************/
4327 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4328 /* nothing left on sendqueue.. consider done */
4330 if ((asoc->stream_queue_cnt == 1) &&
4331 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4332 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4333 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4334 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4336 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4337 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4338 (asoc->stream_queue_cnt == 1) &&
4339 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4340 struct mbuf *op_err;
4344 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4345 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4346 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4349 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4350 (asoc->stream_queue_cnt == 0)) {
4351 struct sctp_nets *netp;
4353 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4354 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4355 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4357 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4358 sctp_stop_timers_for_shutdown(stcb);
4359 if (asoc->alternate) {
4360 netp = asoc->alternate;
4362 netp = asoc->primary_destination;
4364 sctp_send_shutdown(stcb, netp);
4365 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4366 stcb->sctp_ep, stcb, netp);
4367 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4368 stcb->sctp_ep, stcb, netp);
4369 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4370 (asoc->stream_queue_cnt == 0)) {
4371 struct sctp_nets *netp;
4373 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4374 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4375 sctp_stop_timers_for_shutdown(stcb);
4376 if (asoc->alternate) {
4377 netp = asoc->alternate;
4379 netp = asoc->primary_destination;
4381 sctp_send_shutdown_ack(stcb, netp);
4382 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4383 stcb->sctp_ep, stcb, netp);
4386 /*********************************************/
4387 /* Here we perform PR-SCTP procedures */
4389 /*********************************************/
4390 /* C1. update advancedPeerAckPoint */
4391 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4392 asoc->advanced_peer_ack_point = cumack;
4394 /* PR-Sctp issues need to be addressed too */
4395 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4396 struct sctp_tmit_chunk *lchk;
4397 uint32_t old_adv_peer_ack_point;
4399 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4400 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4401 /* C3. See if we need to send a Fwd-TSN */
4402 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4404 * ISSUE with ECN, see FWD-TSN processing.
4406 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4407 send_forward_tsn(stcb, asoc);
4409 /* try to FR fwd-tsn's that get lost too */
4410 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4411 send_forward_tsn(stcb, asoc);
4416 /* Assure a timer is up */
4417 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4418 stcb->sctp_ep, stcb, lchk->whoTo);
4421 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4422 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4424 stcb->asoc.peers_rwnd,
4425 stcb->asoc.total_flight,
4426 stcb->asoc.total_output_queue_size);
4431 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4432 struct sctp_tcb *stcb,
4433 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4434 int *abort_now, uint8_t flags,
4435 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4437 struct sctp_association *asoc;
4438 struct sctp_tmit_chunk *tp1, *tp2;
4439 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4440 uint16_t wake_him = 0;
4441 uint32_t send_s = 0;
4443 int accum_moved = 0;
4444 int will_exit_fast_recovery = 0;
4445 uint32_t a_rwnd, old_rwnd;
4446 int win_probe_recovery = 0;
4447 int win_probe_recovered = 0;
4448 struct sctp_nets *net = NULL;
4451 uint8_t reneged_all = 0;
4452 uint8_t cmt_dac_flag;
4455 * we take any chance we can to service our queues since we cannot
4456 * get awoken when the socket is read from :<
4459 * Now perform the actual SACK handling: 1) Verify that it is not an
4460 * old sack, if so discard. 2) If there is nothing left in the send
4461 * queue (cum-ack is equal to last acked) then you have a duplicate
4462 * too, update any rwnd change and verify no timers are running.
4463 * then return. 3) Process any new consequtive data i.e. cum-ack
4464 * moved process these first and note that it moved. 4) Process any
4465 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4466 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4467 * sync up flightsizes and things, stop all timers and also check
4468 * for shutdown_pending state. If so then go ahead and send off the
4469 * shutdown. If in shutdown recv, send off the shutdown-ack and
4470 * start that timer, Ret. 9) Strike any non-acked things and do FR
4471 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4472 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4473 * if in shutdown_recv state.
4475 SCTP_TCB_LOCK_ASSERT(stcb);
4477 this_sack_lowest_newack = 0;
4478 SCTP_STAT_INCR(sctps_slowpath_sack);
4480 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4481 #ifdef SCTP_ASOCLOG_OF_TSNS
4482 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4483 stcb->asoc.cumack_log_at++;
4484 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4485 stcb->asoc.cumack_log_at = 0;
4490 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4491 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4492 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4495 old_rwnd = stcb->asoc.peers_rwnd;
4496 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4497 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4498 stcb->asoc.overall_error_count,
4500 SCTP_FROM_SCTP_INDATA,
4503 stcb->asoc.overall_error_count = 0;
4505 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4506 sctp_log_sack(asoc->last_acked_seq,
4513 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4515 uint32_t *dupdata, dblock;
4517 for (i = 0; i < num_dup; i++) {
4518 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4519 sizeof(uint32_t), (uint8_t *)&dblock);
4520 if (dupdata == NULL) {
4523 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4527 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4528 tp1 = TAILQ_LAST(&asoc->sent_queue,
4529 sctpchunk_listhead);
4530 send_s = tp1->rec.data.tsn + 1;
4533 send_s = asoc->sending_seq;
4535 if (SCTP_TSN_GE(cum_ack, send_s)) {
4536 struct mbuf *op_err;
4537 char msg[SCTP_DIAG_INFO_LEN];
4540 * no way, we have not even sent this TSN out yet. Peer is
4541 * hopelessly messed up with us.
4543 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4546 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4547 tp1->rec.data.tsn, (void *)tp1);
4552 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4554 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4555 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4556 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4559 /**********************/
4560 /* 1) check the range */
4561 /**********************/
4562 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4563 /* acking something behind */
4567 /* update the Rwnd of the peer */
4568 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4569 TAILQ_EMPTY(&asoc->send_queue) &&
4570 (asoc->stream_queue_cnt == 0)) {
4571 /* nothing left on send/sent and strmq */
4572 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4573 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4574 asoc->peers_rwnd, 0, 0, a_rwnd);
4576 asoc->peers_rwnd = a_rwnd;
4577 if (asoc->sent_queue_retran_cnt) {
4578 asoc->sent_queue_retran_cnt = 0;
4580 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4581 /* SWS sender side engages */
4582 asoc->peers_rwnd = 0;
4584 /* stop any timers */
4585 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4586 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4587 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4588 net->partial_bytes_acked = 0;
4589 net->flight_size = 0;
4591 asoc->total_flight = 0;
4592 asoc->total_flight_count = 0;
4596 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4597 * things. The total byte count acked is tracked in netAckSz AND
4598 * netAck2 is used to track the total bytes acked that are un-
4599 * amibguious and were never retransmitted. We track these on a per
4600 * destination address basis.
4602 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4603 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4604 /* Drag along the window_tsn for cwr's */
4605 net->cwr_window_tsn = cum_ack;
4607 net->prev_cwnd = net->cwnd;
4612 * CMT: Reset CUC and Fast recovery algo variables before
4615 net->new_pseudo_cumack = 0;
4616 net->will_exit_fast_recovery = 0;
4617 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4618 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4622 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4623 * to be greater than the cumack. Also reset saw_newack to 0
4626 net->saw_newack = 0;
4627 net->this_sack_highest_newack = last_tsn;
4629 /* process the new consecutive TSN first */
4630 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4631 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4632 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4634 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4636 * If it is less than ACKED, it is
4637 * now no-longer in flight. Higher
4638 * values may occur during marking
4640 if ((tp1->whoTo->dest_state &
4641 SCTP_ADDR_UNCONFIRMED) &&
4642 (tp1->snd_count < 2)) {
4644 * If there was no retran
4645 * and the address is
4646 * un-confirmed and we sent
4648 * sacked.. its confirmed,
4651 tp1->whoTo->dest_state &=
4652 ~SCTP_ADDR_UNCONFIRMED;
4654 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4655 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4656 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4657 tp1->whoTo->flight_size,
4659 (uint32_t)(uintptr_t)tp1->whoTo,
4662 sctp_flight_size_decrease(tp1);
4663 sctp_total_flight_decrease(stcb, tp1);
4664 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4665 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4669 tp1->whoTo->net_ack += tp1->send_size;
4671 /* CMT SFR and DAC algos */
4672 this_sack_lowest_newack = tp1->rec.data.tsn;
4673 tp1->whoTo->saw_newack = 1;
4675 if (tp1->snd_count < 2) {
4677 * True non-retransmitted
4680 tp1->whoTo->net_ack2 +=
4683 /* update RTO too? */
4687 sctp_calculate_rto(stcb,
4689 &tp1->sent_rcv_time,
4690 SCTP_RTT_FROM_DATA);
4693 if (tp1->whoTo->rto_needed == 0) {
4694 tp1->whoTo->rto_needed = 1;
4700 * CMT: CUCv2 algorithm. From the
4701 * cumack'd TSNs, for each TSN being
4702 * acked for the first time, set the
4703 * following variables for the
4704 * corresp destination.
4705 * new_pseudo_cumack will trigger a
4707 * find_(rtx_)pseudo_cumack will
4708 * trigger search for the next
4709 * expected (rtx-)pseudo-cumack.
4711 tp1->whoTo->new_pseudo_cumack = 1;
4712 tp1->whoTo->find_pseudo_cumack = 1;
4713 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4716 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4717 sctp_log_sack(asoc->last_acked_seq,
4722 SCTP_LOG_TSN_ACKED);
4724 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4725 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4728 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4729 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4730 #ifdef SCTP_AUDITING_ENABLED
4731 sctp_audit_log(0xB3,
4732 (asoc->sent_queue_retran_cnt & 0x000000ff));
4735 if (tp1->rec.data.chunk_was_revoked) {
4736 /* deflate the cwnd */
4737 tp1->whoTo->cwnd -= tp1->book_size;
4738 tp1->rec.data.chunk_was_revoked = 0;
4740 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4741 tp1->sent = SCTP_DATAGRAM_ACKED;
4748 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4749 /* always set this up to cum-ack */
4750 asoc->this_sack_highest_gap = last_tsn;
4752 if ((num_seg > 0) || (num_nr_seg > 0)) {
4755 * thisSackHighestGap will increase while handling NEW
4756 * segments this_sack_highest_newack will increase while
4757 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4758 * used for CMT DAC algo. saw_newack will also change.
4760 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4761 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4762 num_seg, num_nr_seg, &rto_ok)) {
4766 * validate the biggest_tsn_acked in the gap acks if strict
4767 * adherence is wanted.
4769 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4771 * peer is either confused or we are under attack.
4774 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4775 biggest_tsn_acked, send_s);
4779 /*******************************************/
4780 /* cancel ALL T3-send timer if accum moved */
4781 /*******************************************/
4782 if (asoc->sctp_cmt_on_off > 0) {
4783 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4784 if (net->new_pseudo_cumack)
4785 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4787 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4792 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4793 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4794 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4798 /********************************************/
4799 /* drop the acked chunks from the sentqueue */
4800 /********************************************/
4801 asoc->last_acked_seq = cum_ack;
4803 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4804 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4807 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4808 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4809 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4812 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4816 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4817 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4818 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4819 asoc->trigger_reset = 1;
4821 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4822 if (PR_SCTP_ENABLED(tp1->flags)) {
4823 if (asoc->pr_sctp_cnt != 0)
4824 asoc->pr_sctp_cnt--;
4826 asoc->sent_queue_cnt--;
4828 /* sa_ignore NO_NULL_CHK */
4829 sctp_free_bufspace(stcb, asoc, tp1, 1);
4830 sctp_m_freem(tp1->data);
4832 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4833 asoc->sent_queue_cnt_removeable--;
4836 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4837 sctp_log_sack(asoc->last_acked_seq,
4842 SCTP_LOG_FREE_SENT);
4844 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4847 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4849 panic("Warning flight size is positive and should be 0");
4851 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4852 asoc->total_flight);
4854 asoc->total_flight = 0;
4857 /* sa_ignore NO_NULL_CHK */
4858 if ((wake_him) && (stcb->sctp_socket)) {
4859 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4863 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4864 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4865 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4867 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4868 so = SCTP_INP_SO(stcb->sctp_ep);
4869 atomic_add_int(&stcb->asoc.refcnt, 1);
4870 SCTP_TCB_UNLOCK(stcb);
4871 SCTP_SOCKET_LOCK(so, 1);
4872 SCTP_TCB_LOCK(stcb);
4873 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4874 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4875 /* assoc was freed while we were unlocked */
4876 SCTP_SOCKET_UNLOCK(so, 1);
4880 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4881 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4882 SCTP_SOCKET_UNLOCK(so, 1);
4885 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4886 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4890 if (asoc->fast_retran_loss_recovery && accum_moved) {
4891 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4892 /* Setup so we will exit RFC2582 fast recovery */
4893 will_exit_fast_recovery = 1;
4897 * Check for revoked fragments:
4899 * if Previous sack - Had no frags then we can't have any revoked if
4900 * Previous sack - Had frag's then - If we now have frags aka
4901 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4902 * some of them. else - The peer revoked all ACKED fragments, since
4903 * we had some before and now we have NONE.
4907 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4908 asoc->saw_sack_with_frags = 1;
4909 } else if (asoc->saw_sack_with_frags) {
4910 int cnt_revoked = 0;
4912 /* Peer revoked all dg's marked or acked */
4913 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4914 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4915 tp1->sent = SCTP_DATAGRAM_SENT;
4916 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4917 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4918 tp1->whoTo->flight_size,
4920 (uint32_t)(uintptr_t)tp1->whoTo,
4923 sctp_flight_size_increase(tp1);
4924 sctp_total_flight_increase(stcb, tp1);
4925 tp1->rec.data.chunk_was_revoked = 1;
4927 * To ensure that this increase in
4928 * flightsize, which is artificial, does not
4929 * throttle the sender, we also increase the
4930 * cwnd artificially.
4932 tp1->whoTo->cwnd += tp1->book_size;
4939 asoc->saw_sack_with_frags = 0;
4942 asoc->saw_sack_with_nr_frags = 1;
4944 asoc->saw_sack_with_nr_frags = 0;
4946 /* JRS - Use the congestion control given in the CC module */
4947 if (ecne_seen == 0) {
4948 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4949 if (net->net_ack2 > 0) {
4951 * Karn's rule applies to clearing error
4952 * count, this is optional.
4954 net->error_count = 0;
4955 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4956 /* addr came good */
4957 net->dest_state |= SCTP_ADDR_REACHABLE;
4958 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4959 0, (void *)net, SCTP_SO_NOT_LOCKED);
4962 if (net == stcb->asoc.primary_destination) {
4963 if (stcb->asoc.alternate) {
4965 * release the alternate,
4968 sctp_free_remote_addr(stcb->asoc.alternate);
4969 stcb->asoc.alternate = NULL;
4973 if (net->dest_state & SCTP_ADDR_PF) {
4974 net->dest_state &= ~SCTP_ADDR_PF;
4975 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4976 stcb->sctp_ep, stcb, net,
4977 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4978 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4979 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4980 /* Done with this net */
4983 /* restore any doubled timers */
4984 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4985 if (net->RTO < stcb->asoc.minrto) {
4986 net->RTO = stcb->asoc.minrto;
4988 if (net->RTO > stcb->asoc.maxrto) {
4989 net->RTO = stcb->asoc.maxrto;
4993 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4996 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4997 /* nothing left in-flight */
4998 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4999 /* stop all timers */
5000 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5002 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5003 net->flight_size = 0;
5004 net->partial_bytes_acked = 0;
5006 asoc->total_flight = 0;
5007 asoc->total_flight_count = 0;
5010 /**********************************/
5011 /* Now what about shutdown issues */
5012 /**********************************/
5013 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5014 /* nothing left on sendqueue.. consider done */
5015 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5016 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5017 asoc->peers_rwnd, 0, 0, a_rwnd);
5019 asoc->peers_rwnd = a_rwnd;
5020 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5021 /* SWS sender side engages */
5022 asoc->peers_rwnd = 0;
5025 if ((asoc->stream_queue_cnt == 1) &&
5026 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5027 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5028 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5029 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5031 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5032 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5033 (asoc->stream_queue_cnt == 1) &&
5034 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5035 struct mbuf *op_err;
5039 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5040 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5041 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5044 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5045 (asoc->stream_queue_cnt == 0)) {
5046 struct sctp_nets *netp;
5048 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5049 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5050 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5052 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5053 sctp_stop_timers_for_shutdown(stcb);
5054 if (asoc->alternate) {
5055 netp = asoc->alternate;
5057 netp = asoc->primary_destination;
5059 sctp_send_shutdown(stcb, netp);
5060 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5061 stcb->sctp_ep, stcb, netp);
5062 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5063 stcb->sctp_ep, stcb, netp);
5065 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5066 (asoc->stream_queue_cnt == 0)) {
5067 struct sctp_nets *netp;
5069 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5070 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5071 sctp_stop_timers_for_shutdown(stcb);
5072 if (asoc->alternate) {
5073 netp = asoc->alternate;
5075 netp = asoc->primary_destination;
5077 sctp_send_shutdown_ack(stcb, netp);
5078 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5079 stcb->sctp_ep, stcb, netp);
5084 * Now here we are going to recycle net_ack for a different use...
5087 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5092 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5093 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5094 * automatically ensure that.
5096 if ((asoc->sctp_cmt_on_off > 0) &&
5097 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5098 (cmt_dac_flag == 0)) {
5099 this_sack_lowest_newack = cum_ack;
5101 if ((num_seg > 0) || (num_nr_seg > 0)) {
5102 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5103 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5105 /* JRS - Use the congestion control given in the CC module */
5106 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5108 /* Now are we exiting loss recovery ? */
5109 if (will_exit_fast_recovery) {
5110 /* Ok, we must exit fast recovery */
5111 asoc->fast_retran_loss_recovery = 0;
5113 if ((asoc->sat_t3_loss_recovery) &&
5114 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5115 /* end satellite t3 loss recovery */
5116 asoc->sat_t3_loss_recovery = 0;
5121 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5122 if (net->will_exit_fast_recovery) {
5123 /* Ok, we must exit fast recovery */
5124 net->fast_retran_loss_recovery = 0;
5128 /* Adjust and set the new rwnd value */
5129 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5130 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5131 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5133 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5134 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5135 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5136 /* SWS sender side engages */
5137 asoc->peers_rwnd = 0;
5139 if (asoc->peers_rwnd > old_rwnd) {
5140 win_probe_recovery = 1;
5144 * Now we must setup so we have a timer up for anyone with
5150 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5151 if (win_probe_recovery && (net->window_probe)) {
5152 win_probe_recovered = 1;
5154 * Find first chunk that was used with
5155 * window probe and clear the event. Put
5156 * it back into the send queue as if has
5159 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5160 if (tp1->window_probe) {
5161 sctp_window_probe_recovery(stcb, asoc, tp1);
5166 if (net->flight_size) {
5168 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5169 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5170 stcb->sctp_ep, stcb, net);
5172 if (net->window_probe) {
5173 net->window_probe = 0;
5176 if (net->window_probe) {
5178 * In window probes we must assure a timer
5179 * is still running there
5181 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5182 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5183 stcb->sctp_ep, stcb, net);
5186 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5187 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5189 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5194 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5195 (asoc->sent_queue_retran_cnt == 0) &&
5196 (win_probe_recovered == 0) &&
5199 * huh, this should not happen unless all packets are
5200 * PR-SCTP and marked to skip of course.
5202 if (sctp_fs_audit(asoc)) {
5203 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5204 net->flight_size = 0;
5206 asoc->total_flight = 0;
5207 asoc->total_flight_count = 0;
5208 asoc->sent_queue_retran_cnt = 0;
5209 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5210 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5211 sctp_flight_size_increase(tp1);
5212 sctp_total_flight_increase(stcb, tp1);
5213 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5214 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5221 /*********************************************/
5222 /* Here we perform PR-SCTP procedures */
5224 /*********************************************/
5225 /* C1. update advancedPeerAckPoint */
5226 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5227 asoc->advanced_peer_ack_point = cum_ack;
5229 /* C2. try to further move advancedPeerAckPoint ahead */
5230 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5231 struct sctp_tmit_chunk *lchk;
5232 uint32_t old_adv_peer_ack_point;
5234 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5235 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5236 /* C3. See if we need to send a Fwd-TSN */
5237 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5239 * ISSUE with ECN, see FWD-TSN processing.
5241 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5242 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5243 0xee, cum_ack, asoc->advanced_peer_ack_point,
5244 old_adv_peer_ack_point);
5246 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5247 send_forward_tsn(stcb, asoc);
5249 /* try to FR fwd-tsn's that get lost too */
5250 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5251 send_forward_tsn(stcb, asoc);
5256 /* Assure a timer is up */
5257 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5258 stcb->sctp_ep, stcb, lchk->whoTo);
5261 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5262 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5264 stcb->asoc.peers_rwnd,
5265 stcb->asoc.total_flight,
5266 stcb->asoc.total_output_queue_size);
5271 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5274 uint32_t cum_ack, a_rwnd;
5276 cum_ack = ntohl(cp->cumulative_tsn_ack);
5277 /* Arrange so a_rwnd does NOT change */
5278 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5280 /* Now call the express sack handling */
5281 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5285 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5286 struct sctp_stream_in *strmin)
5288 struct sctp_queued_to_read *control, *ncontrol;
5289 struct sctp_association *asoc;
5291 int need_reasm_check = 0;
5294 mid = strmin->last_mid_delivered;
5296 * First deliver anything prior to and including the stream no that
5299 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5300 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5301 /* this is deliverable now */
5302 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5303 if (control->on_strm_q) {
5304 if (control->on_strm_q == SCTP_ON_ORDERED) {
5305 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5306 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5307 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5310 panic("strmin: %p ctl: %p unknown %d",
5311 strmin, control, control->on_strm_q);
5314 control->on_strm_q = 0;
5316 /* subtract pending on streams */
5317 if (asoc->size_on_all_streams >= control->length) {
5318 asoc->size_on_all_streams -= control->length;
5321 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5323 asoc->size_on_all_streams = 0;
5326 sctp_ucount_decr(asoc->cnt_on_all_streams);
5327 /* deliver it to at least the delivery-q */
5328 if (stcb->sctp_socket) {
5329 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5330 sctp_add_to_readq(stcb->sctp_ep, stcb,
5332 &stcb->sctp_socket->so_rcv,
5333 1, SCTP_READ_LOCK_HELD,
5334 SCTP_SO_NOT_LOCKED);
5337 /* Its a fragmented message */
5338 if (control->first_frag_seen) {
5340 * Make it so this is next to
5341 * deliver, we restore later
5343 strmin->last_mid_delivered = control->mid - 1;
5344 need_reasm_check = 1;
5349 /* no more delivery now. */
5353 if (need_reasm_check) {
5356 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5357 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5358 /* Restore the next to deliver unless we are ahead */
5359 strmin->last_mid_delivered = mid;
5362 /* Left the front Partial one on */
5365 need_reasm_check = 0;
5368 * now we must deliver things in queue the normal way if any are
5371 mid = strmin->last_mid_delivered + 1;
5372 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5373 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5374 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5375 /* this is deliverable now */
5376 if (control->on_strm_q) {
5377 if (control->on_strm_q == SCTP_ON_ORDERED) {
5378 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5379 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5380 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5383 panic("strmin: %p ctl: %p unknown %d",
5384 strmin, control, control->on_strm_q);
5387 control->on_strm_q = 0;
5389 /* subtract pending on streams */
5390 if (asoc->size_on_all_streams >= control->length) {
5391 asoc->size_on_all_streams -= control->length;
5394 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5396 asoc->size_on_all_streams = 0;
5399 sctp_ucount_decr(asoc->cnt_on_all_streams);
5400 /* deliver it to at least the delivery-q */
5401 strmin->last_mid_delivered = control->mid;
5402 if (stcb->sctp_socket) {
5403 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5404 sctp_add_to_readq(stcb->sctp_ep, stcb,
5406 &stcb->sctp_socket->so_rcv, 1,
5407 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5410 mid = strmin->last_mid_delivered + 1;
5412 /* Its a fragmented message */
5413 if (control->first_frag_seen) {
5415 * Make it so this is next to
5418 strmin->last_mid_delivered = control->mid - 1;
5419 need_reasm_check = 1;
5427 if (need_reasm_check) {
5428 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5435 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5436 struct sctp_association *asoc,
5437 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5439 struct sctp_queued_to_read *control;
5440 struct sctp_stream_in *strm;
5441 struct sctp_tmit_chunk *chk, *nchk;
5442 int cnt_removed = 0;
5445 * For now large messages held on the stream reasm that are complete
5446 * will be tossed too. We could in theory do more work to spin
5447 * through and stop after dumping one msg aka seeing the start of a
5448 * new msg at the head, and call the delivery function... to see if
5449 * it can be delivered... But for now we just dump everything on the
5452 strm = &asoc->strmin[stream];
5453 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5454 if (control == NULL) {
5458 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5461 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5462 /* Purge hanging chunks */
5463 if (!asoc->idata_supported && (ordered == 0)) {
5464 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5469 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5470 if (asoc->size_on_reasm_queue >= chk->send_size) {
5471 asoc->size_on_reasm_queue -= chk->send_size;
5474 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5476 asoc->size_on_reasm_queue = 0;
5479 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5481 sctp_m_freem(chk->data);
5484 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5486 if (!TAILQ_EMPTY(&control->reasm)) {
5487 /* This has to be old data, unordered */
5488 if (control->data) {
5489 sctp_m_freem(control->data);
5490 control->data = NULL;
5492 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5493 chk = TAILQ_FIRST(&control->reasm);
5494 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5495 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5496 sctp_add_chk_to_control(control, strm, stcb, asoc,
5497 chk, SCTP_READ_LOCK_HELD);
5499 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5502 if (control->on_strm_q == SCTP_ON_ORDERED) {
5503 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5504 if (asoc->size_on_all_streams >= control->length) {
5505 asoc->size_on_all_streams -= control->length;
5508 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5510 asoc->size_on_all_streams = 0;
5513 sctp_ucount_decr(asoc->cnt_on_all_streams);
5514 control->on_strm_q = 0;
5515 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5516 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5517 control->on_strm_q = 0;
5519 } else if (control->on_strm_q) {
5520 panic("strm: %p ctl: %p unknown %d",
5521 strm, control, control->on_strm_q);
5524 control->on_strm_q = 0;
5525 if (control->on_read_q == 0) {
5526 sctp_free_remote_addr(control->whoFrom);
5527 if (control->data) {
5528 sctp_m_freem(control->data);
5529 control->data = NULL;
5531 sctp_free_a_readq(stcb, control);
5536 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5537 struct sctp_forward_tsn_chunk *fwd,
5538 int *abort_flag, struct mbuf *m, int offset)
5540 /* The pr-sctp fwd tsn */
5542 * here we will perform all the data receiver side steps for
5543 * processing FwdTSN, as required in by pr-sctp draft:
5545 * Assume we get FwdTSN(x):
5547 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5548 * + others we have 3) examine and update re-ordering queue on
5549 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5550 * report where we are.
5552 struct sctp_association *asoc;
5553 uint32_t new_cum_tsn, gap;
5554 unsigned int i, fwd_sz, m_size;
5556 struct sctp_stream_in *strm;
5557 struct sctp_queued_to_read *control, *sv;
5560 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5561 SCTPDBG(SCTP_DEBUG_INDATA1,
5562 "Bad size too small/big fwd-tsn\n");
5565 m_size = (stcb->asoc.mapping_array_size << 3);
5566 /*************************************************************/
5567 /* 1. Here we update local cumTSN and shift the bitmap array */
5568 /*************************************************************/
5569 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5571 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5572 /* Already got there ... */
5576 * now we know the new TSN is more advanced, let's find the actual
5579 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5580 asoc->cumulative_tsn = new_cum_tsn;
5581 if (gap >= m_size) {
5582 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5583 struct mbuf *op_err;
5584 char msg[SCTP_DIAG_INFO_LEN];
5587 * out of range (of single byte chunks in the rwnd I
5588 * give out). This must be an attacker.
5591 snprintf(msg, sizeof(msg),
5592 "New cum ack %8.8x too high, highest TSN %8.8x",
5593 new_cum_tsn, asoc->highest_tsn_inside_map);
5594 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5595 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5596 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5599 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5601 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5602 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5603 asoc->highest_tsn_inside_map = new_cum_tsn;
5605 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5606 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5608 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5609 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5612 SCTP_TCB_LOCK_ASSERT(stcb);
5613 for (i = 0; i <= gap; i++) {
5614 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5615 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5616 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5617 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5618 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5623 /*************************************************************/
5624 /* 2. Clear up re-assembly queue */
5625 /*************************************************************/
5627 /* This is now done as part of clearing up the stream/seq */
5628 if (asoc->idata_supported == 0) {
5631 /* Flush all the un-ordered data based on cum-tsn */
5632 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5633 for (sid = 0; sid < asoc->streamincnt; sid++) {
5634 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5636 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5638 /*******************************************************/
5639 /* 3. Update the PR-stream re-ordering queues and fix */
5640 /* delivery issues as needed. */
5641 /*******************************************************/
5642 fwd_sz -= sizeof(*fwd);
5645 unsigned int num_str;
5646 uint32_t mid, cur_mid;
5648 uint16_t ordered, flags;
5649 struct sctp_strseq *stseq, strseqbuf;
5650 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5652 offset += sizeof(*fwd);
5654 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5655 if (asoc->idata_supported) {
5656 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5658 num_str = fwd_sz / sizeof(struct sctp_strseq);
5660 for (i = 0; i < num_str; i++) {
5661 if (asoc->idata_supported) {
5662 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5663 sizeof(struct sctp_strseq_mid),
5664 (uint8_t *)&strseqbuf_m);
5665 offset += sizeof(struct sctp_strseq_mid);
5666 if (stseq_m == NULL) {
5669 sid = ntohs(stseq_m->sid);
5670 mid = ntohl(stseq_m->mid);
5671 flags = ntohs(stseq_m->flags);
5672 if (flags & PR_SCTP_UNORDERED_FLAG) {
5678 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5679 sizeof(struct sctp_strseq),
5680 (uint8_t *)&strseqbuf);
5681 offset += sizeof(struct sctp_strseq);
5682 if (stseq == NULL) {
5685 sid = ntohs(stseq->sid);
5686 mid = (uint32_t)ntohs(stseq->ssn);
5694 * Ok we now look for the stream/seq on the read
5695 * queue where its not all delivered. If we find it
5696 * we transmute the read entry into a PDI_ABORTED.
5698 if (sid >= asoc->streamincnt) {
5699 /* screwed up streams, stop! */
5702 if ((asoc->str_of_pdapi == sid) &&
5703 (asoc->ssn_of_pdapi == mid)) {
5705 * If this is the one we were partially
5706 * delivering now then we no longer are.
5707 * Note this will change with the reassembly
5710 asoc->fragmented_delivery_inprogress = 0;
5712 strm = &asoc->strmin[sid];
5713 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5714 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5716 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5717 if ((control->sinfo_stream == sid) &&
5718 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5719 str_seq = (sid << 16) | (0x0000ffff & mid);
5720 control->pdapi_aborted = 1;
5721 sv = stcb->asoc.control_pdapi;
5722 control->end_added = 1;
5723 if (control->on_strm_q == SCTP_ON_ORDERED) {
5724 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5725 if (asoc->size_on_all_streams >= control->length) {
5726 asoc->size_on_all_streams -= control->length;
5729 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5731 asoc->size_on_all_streams = 0;
5734 sctp_ucount_decr(asoc->cnt_on_all_streams);
5735 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5736 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5738 } else if (control->on_strm_q) {
5739 panic("strm: %p ctl: %p unknown %d",
5740 strm, control, control->on_strm_q);
5743 control->on_strm_q = 0;
5744 stcb->asoc.control_pdapi = control;
5745 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5747 SCTP_PARTIAL_DELIVERY_ABORTED,
5749 SCTP_SO_NOT_LOCKED);
5750 stcb->asoc.control_pdapi = sv;
5752 } else if ((control->sinfo_stream == sid) &&
5753 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5754 /* We are past our victim SSN */
5758 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5759 /* Update the sequence number */
5760 strm->last_mid_delivered = mid;
5762 /* now kick the stream the new way */
5763 /* sa_ignore NO_NULL_CHK */
5764 sctp_kick_prsctp_reorder_queue(stcb, strm);
5766 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5769 * Now slide thing forward.
5771 sctp_slide_mapping_arrays(stcb);