2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int lock_held);
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
78 /* Calculate what the rwnd would be */
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
85 * This is really set wrong with respect to a 1-2-m socket. Since
86 * the sb_cc is the count that everyone as put up. When we re-write
87 * sctp_soreceive then we will fix this so that ONLY this
88 * associations data is taken into account.
90 if (stcb->sctp_socket == NULL) {
94 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 if (stcb->asoc.sb_cc == 0 &&
99 asoc->cnt_on_reasm_queue == 0 &&
100 asoc->cnt_on_all_streams == 0) {
101 /* Full rwnd granted */
102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
105 /* get actual space */
106 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
108 * take out what has NOT been put on socket queue and we yet hold
111 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 asoc->cnt_on_reasm_queue * MSIZE));
113 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 asoc->cnt_on_all_streams * MSIZE));
120 /* what is the overhead of all these rwnd's */
121 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
123 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 * even it is 0. SWS engaged
126 if (calc < stcb->asoc.my_rwnd_control_len) {
135 * Build out our readq entry based on the incoming packet.
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139 struct sctp_nets *net,
140 uint32_t tsn, uint32_t ppid,
141 uint32_t context, uint16_t sid,
142 uint32_t mid, uint8_t flags,
145 struct sctp_queued_to_read *read_queue_e = NULL;
147 sctp_alloc_a_readq(stcb, read_queue_e);
148 if (read_queue_e == NULL) {
151 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 read_queue_e->sinfo_stream = sid;
153 read_queue_e->sinfo_flags = (flags << 8);
154 read_queue_e->sinfo_ppid = ppid;
155 read_queue_e->sinfo_context = context;
156 read_queue_e->sinfo_tsn = tsn;
157 read_queue_e->sinfo_cumtsn = tsn;
158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 read_queue_e->mid = mid;
160 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 TAILQ_INIT(&read_queue_e->reasm);
162 read_queue_e->whoFrom = net;
163 atomic_add_int(&net->ref_count, 1);
164 read_queue_e->data = dm;
165 read_queue_e->stcb = stcb;
166 read_queue_e->port_from = stcb->rport;
168 return (read_queue_e);
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
174 struct sctp_extrcvinfo *seinfo;
175 struct sctp_sndrcvinfo *outinfo;
176 struct sctp_rcvinfo *rcvinfo;
177 struct sctp_nxtinfo *nxtinfo;
184 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 /* user does not want any ancillary data */
192 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
195 seinfo = (struct sctp_extrcvinfo *)sinfo;
196 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
199 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
206 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
209 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
215 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
220 SCTP_BUF_LEN(ret) = 0;
222 /* We need a CMSG header followed by the struct */
223 cmh = mtod(ret, struct cmsghdr *);
225 * Make sure that there is no un-initialized padding between the
226 * cmsg header and cmsg data and after the cmsg data.
229 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 cmh->cmsg_level = IPPROTO_SCTP;
231 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 cmh->cmsg_type = SCTP_RCVINFO;
233 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 rcvinfo->rcv_context = sinfo->sinfo_context;
241 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
246 cmh->cmsg_level = IPPROTO_SCTP;
247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 cmh->cmsg_type = SCTP_NXTINFO;
249 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 nxtinfo->nxt_flags = 0;
252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 nxtinfo->nxt_flags |= SCTP_UNORDERED;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 nxtinfo->nxt_flags |= SCTP_COMPLETE;
261 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
267 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 cmh->cmsg_level = IPPROTO_SCTP;
269 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 cmh->cmsg_type = SCTP_EXTRCV;
273 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 cmh->cmsg_type = SCTP_SNDRCV;
279 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
287 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
289 uint32_t gap, i, cumackp1;
291 int in_r = 0, in_nr = 0;
293 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
296 cumackp1 = asoc->cumulative_tsn + 1;
297 if (SCTP_TSN_GT(cumackp1, tsn)) {
299 * this tsn is behind the cum ack and thus we don't need to
300 * worry about it being moved from one to the other.
304 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
305 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
306 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 if ((in_r == 0) && (in_nr == 0)) {
309 panic("Things are really messed up now");
311 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
312 sctp_print_mapping_array(asoc);
316 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
318 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
319 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
320 asoc->highest_tsn_inside_nr_map = tsn;
322 if (tsn == asoc->highest_tsn_inside_map) {
323 /* We must back down to see what the new highest is */
324 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
325 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 asoc->highest_tsn_inside_map = i;
333 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
339 sctp_place_control_in_stream(struct sctp_stream_in *strm,
340 struct sctp_association *asoc,
341 struct sctp_queued_to_read *control)
343 struct sctp_queued_to_read *at;
344 struct sctp_readhead *q;
345 uint8_t flags, unordered;
347 flags = (control->sinfo_flags >> 8);
348 unordered = flags & SCTP_DATA_UNORDERED;
350 q = &strm->uno_inqueue;
351 if (asoc->idata_supported == 0) {
352 if (!TAILQ_EMPTY(q)) {
354 * Only one stream can be here in old style
359 TAILQ_INSERT_TAIL(q, control, next_instrm);
360 control->on_strm_q = SCTP_ON_UNORDERED;
366 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
367 control->end_added = 1;
368 control->first_frag_seen = 1;
369 control->last_frag_seen = 1;
371 if (TAILQ_EMPTY(q)) {
373 TAILQ_INSERT_HEAD(q, control, next_instrm);
375 control->on_strm_q = SCTP_ON_UNORDERED;
377 control->on_strm_q = SCTP_ON_ORDERED;
381 TAILQ_FOREACH(at, q, next_instrm) {
382 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
384 * one in queue is bigger than the new one,
385 * insert before this one
387 TAILQ_INSERT_BEFORE(at, control, next_instrm);
389 control->on_strm_q = SCTP_ON_UNORDERED;
391 control->on_strm_q = SCTP_ON_ORDERED;
394 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
396 * Gak, He sent me a duplicate msg id
397 * number?? return -1 to abort.
401 if (TAILQ_NEXT(at, next_instrm) == NULL) {
403 * We are at the end, insert it
406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
407 sctp_log_strm_del(control, at,
408 SCTP_STR_LOG_FROM_INSERT_TL);
410 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
412 control->on_strm_q = SCTP_ON_UNORDERED;
414 control->on_strm_q = SCTP_ON_ORDERED;
425 sctp_abort_in_reasm(struct sctp_tcb *stcb,
426 struct sctp_queued_to_read *control,
427 struct sctp_tmit_chunk *chk,
428 int *abort_flag, int opspot)
430 char msg[SCTP_DIAG_INFO_LEN];
433 if (stcb->asoc.idata_supported) {
434 snprintf(msg, sizeof(msg),
435 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
437 control->fsn_included,
440 chk->rec.data.fsn, chk->rec.data.mid);
442 snprintf(msg, sizeof(msg),
443 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
445 control->fsn_included,
449 (uint16_t)chk->rec.data.mid);
451 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
452 sctp_m_freem(chk->data);
454 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
455 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
456 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
461 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
464 * The control could not be placed and must be cleaned.
466 struct sctp_tmit_chunk *chk, *nchk;
468 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
469 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
471 sctp_m_freem(chk->data);
473 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
475 sctp_free_a_readq(stcb, control);
479 * Queue the chunk either right into the socket buffer if it is the next one
480 * to go OR put it in the correct place in the delivery queue. If we do
481 * append to the so_buf, keep doing so until we are out of order as
482 * long as the control's entered are non-fragmented.
485 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
486 struct sctp_association *asoc,
487 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
490 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
491 * all the data in one stream this could happen quite rapidly. One
492 * could use the TSN to keep track of things, but this scheme breaks
493 * down in the other type of stream usage that could occur. Send a
494 * single msg to stream 0, send 4Billion messages to stream 1, now
495 * send a message to stream 0. You have a situation where the TSN
496 * has wrapped but not in the stream. Is this worth worrying about
497 * or should we just change our queue sort at the bottom to be by
500 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
501 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
502 * assignment this could happen... and I don't see how this would be
503 * a violation. So for now I am undecided an will leave the sort by
504 * SSN alone. Maybe a hybred approach is the answer
507 struct sctp_queued_to_read *at;
511 struct sctp_stream_in *strm;
512 char msg[SCTP_DIAG_INFO_LEN];
514 strm = &asoc->strmin[control->sinfo_stream];
515 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
516 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
518 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
519 /* The incoming sseq is behind where we last delivered? */
520 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
521 strm->last_mid_delivered, control->mid);
523 * throw it in the stream so it gets cleaned up in
524 * association destruction
526 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
527 if (asoc->idata_supported) {
528 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
529 strm->last_mid_delivered, control->sinfo_tsn,
530 control->sinfo_stream, control->mid);
532 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
533 (uint16_t)strm->last_mid_delivered,
535 control->sinfo_stream,
536 (uint16_t)control->mid);
538 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
539 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
540 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
546 asoc->size_on_all_streams += control->length;
547 sctp_ucount_incr(asoc->cnt_on_all_streams);
548 nxt_todel = strm->last_mid_delivered + 1;
549 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
550 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
553 so = SCTP_INP_SO(stcb->sctp_ep);
554 atomic_add_int(&stcb->asoc.refcnt, 1);
555 SCTP_TCB_UNLOCK(stcb);
556 SCTP_SOCKET_LOCK(so, 1);
558 atomic_subtract_int(&stcb->asoc.refcnt, 1);
559 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
560 SCTP_SOCKET_UNLOCK(so, 1);
564 /* can be delivered right away? */
565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
566 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
568 /* EY it wont be queued if it could be delivered directly */
570 if (asoc->size_on_all_streams >= control->length) {
571 asoc->size_on_all_streams -= control->length;
574 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
576 asoc->size_on_all_streams = 0;
579 sctp_ucount_decr(asoc->cnt_on_all_streams);
580 strm->last_mid_delivered++;
581 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
582 sctp_add_to_readq(stcb->sctp_ep, stcb,
584 &stcb->sctp_socket->so_rcv, 1,
585 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
586 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
588 nxt_todel = strm->last_mid_delivered + 1;
589 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
590 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
591 if (control->on_strm_q == SCTP_ON_ORDERED) {
592 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
593 if (asoc->size_on_all_streams >= control->length) {
594 asoc->size_on_all_streams -= control->length;
597 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
599 asoc->size_on_all_streams = 0;
602 sctp_ucount_decr(asoc->cnt_on_all_streams);
605 panic("Huh control: %p is on_strm_q: %d",
606 control, control->on_strm_q);
609 control->on_strm_q = 0;
610 strm->last_mid_delivered++;
612 * We ignore the return of deliver_data here
613 * since we always can hold the chunk on the
614 * d-queue. And we have a finite number that
615 * can be delivered from the strq.
617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
618 sctp_log_strm_del(control, NULL,
619 SCTP_STR_LOG_FROM_IMMED_DEL);
621 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
622 sctp_add_to_readq(stcb->sctp_ep, stcb,
624 &stcb->sctp_socket->so_rcv, 1,
625 SCTP_READ_LOCK_NOT_HELD,
628 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
633 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
634 SCTP_SOCKET_UNLOCK(so, 1);
639 * Ok, we did not deliver this guy, find the correct place
640 * to put it on the queue.
642 if (sctp_place_control_in_stream(strm, asoc, control)) {
643 snprintf(msg, sizeof(msg),
644 "Queue to str MID: %u duplicate",
646 sctp_clean_up_control(stcb, control);
647 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
648 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
649 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
657 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
659 struct mbuf *m, *prev = NULL;
660 struct sctp_tcb *stcb;
662 stcb = control->stcb;
663 control->held_length = 0;
667 if (SCTP_BUF_LEN(m) == 0) {
668 /* Skip mbufs with NO length */
671 control->data = sctp_m_free(m);
674 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
675 m = SCTP_BUF_NEXT(prev);
678 control->tail_mbuf = prev;
683 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
684 if (control->on_read_q) {
686 * On read queue so we must increment the SB stuff,
687 * we assume caller has done any locks of SB.
689 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
691 m = SCTP_BUF_NEXT(m);
694 control->tail_mbuf = prev;
699 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
701 struct mbuf *prev = NULL;
702 struct sctp_tcb *stcb;
704 stcb = control->stcb;
707 panic("Control broken");
712 if (control->tail_mbuf == NULL) {
715 sctp_setup_tail_pointer(control);
718 control->tail_mbuf->m_next = m;
720 if (SCTP_BUF_LEN(m) == 0) {
721 /* Skip mbufs with NO length */
724 control->tail_mbuf->m_next = sctp_m_free(m);
725 m = control->tail_mbuf->m_next;
727 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
728 m = SCTP_BUF_NEXT(prev);
731 control->tail_mbuf = prev;
736 if (control->on_read_q) {
738 * On read queue so we must increment the SB stuff,
739 * we assume caller has done any locks of SB.
741 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
743 *added += SCTP_BUF_LEN(m);
744 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
745 m = SCTP_BUF_NEXT(m);
748 control->tail_mbuf = prev;
753 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
755 memset(nc, 0, sizeof(struct sctp_queued_to_read));
756 nc->sinfo_stream = control->sinfo_stream;
757 nc->mid = control->mid;
758 TAILQ_INIT(&nc->reasm);
759 nc->top_fsn = control->top_fsn;
760 nc->mid = control->mid;
761 nc->sinfo_flags = control->sinfo_flags;
762 nc->sinfo_ppid = control->sinfo_ppid;
763 nc->sinfo_context = control->sinfo_context;
764 nc->fsn_included = 0xffffffff;
765 nc->sinfo_tsn = control->sinfo_tsn;
766 nc->sinfo_cumtsn = control->sinfo_cumtsn;
767 nc->sinfo_assoc_id = control->sinfo_assoc_id;
768 nc->whoFrom = control->whoFrom;
769 atomic_add_int(&nc->whoFrom->ref_count, 1);
770 nc->stcb = control->stcb;
771 nc->port_from = control->port_from;
775 sctp_reset_a_control(struct sctp_queued_to_read *control,
776 struct sctp_inpcb *inp, uint32_t tsn)
778 control->fsn_included = tsn;
779 if (control->on_read_q) {
781 * We have to purge it from there, hopefully this will work
784 TAILQ_REMOVE(&inp->read_queue, control, next);
785 control->on_read_q = 0;
790 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
791 struct sctp_association *asoc,
792 struct sctp_stream_in *strm,
793 struct sctp_queued_to_read *control,
795 int inp_read_lock_held)
798 * Special handling for the old un-ordered data chunk. All the
799 * chunks/TSN's go to mid 0. So we have to do the old style watching
800 * to see if we have it all. If you return one, no other control
801 * entries on the un-ordered queue will be looked at. In theory
802 * there should be no others entries in reality, unless the guy is
803 * sending both unordered NDATA and unordered DATA...
805 struct sctp_tmit_chunk *chk, *lchk, *tchk;
807 struct sctp_queued_to_read *nc;
810 if (control->first_frag_seen == 0) {
811 /* Nothing we can do, we have not seen the first piece yet */
814 /* Collapse any we can */
817 fsn = control->fsn_included + 1;
818 /* Now what can we add? */
819 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
820 if (chk->rec.data.fsn == fsn) {
822 sctp_alloc_a_readq(stcb, nc);
826 memset(nc, 0, sizeof(struct sctp_queued_to_read));
827 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
828 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
832 if (control->end_added) {
834 if (!TAILQ_EMPTY(&control->reasm)) {
836 * Ok we have to move anything left
837 * on the control queue to a new
840 sctp_build_readq_entry_from_ctl(nc, control);
841 tchk = TAILQ_FIRST(&control->reasm);
842 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
843 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
844 if (asoc->size_on_reasm_queue >= tchk->send_size) {
845 asoc->size_on_reasm_queue -= tchk->send_size;
848 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
850 asoc->size_on_reasm_queue = 0;
853 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
854 nc->first_frag_seen = 1;
855 nc->fsn_included = tchk->rec.data.fsn;
856 nc->data = tchk->data;
857 nc->sinfo_ppid = tchk->rec.data.ppid;
858 nc->sinfo_tsn = tchk->rec.data.tsn;
859 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
861 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
862 sctp_setup_tail_pointer(nc);
863 tchk = TAILQ_FIRST(&control->reasm);
865 /* Spin the rest onto the queue */
867 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
868 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
869 tchk = TAILQ_FIRST(&control->reasm);
872 * Now lets add it to the queue
873 * after removing control
875 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
876 nc->on_strm_q = SCTP_ON_UNORDERED;
877 if (control->on_strm_q) {
878 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
879 control->on_strm_q = 0;
882 if (control->pdapi_started) {
883 strm->pd_api_started = 0;
884 control->pdapi_started = 0;
886 if (control->on_strm_q) {
887 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
888 control->on_strm_q = 0;
889 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
891 if (control->on_read_q == 0) {
892 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
893 &stcb->sctp_socket->so_rcv, control->end_added,
894 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
896 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
897 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
899 * Switch to the new guy and
905 if (nc->on_strm_q == 0) {
906 sctp_free_a_readq(stcb, nc);
911 sctp_free_a_readq(stcb, nc);
918 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
919 strm->pd_api_started = 1;
920 control->pdapi_started = 1;
921 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
922 &stcb->sctp_socket->so_rcv, control->end_added,
923 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
924 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
932 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
933 struct sctp_association *asoc,
934 struct sctp_queued_to_read *control,
935 struct sctp_tmit_chunk *chk,
938 struct sctp_tmit_chunk *at;
942 * Here we need to place the chunk into the control structure sorted
943 * in the correct order.
945 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
946 /* Its the very first one. */
947 SCTPDBG(SCTP_DEBUG_XXX,
948 "chunk is a first fsn: %u becomes fsn_included\n",
950 at = TAILQ_FIRST(&control->reasm);
951 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
953 * The first chunk in the reassembly is a smaller
954 * TSN than this one, even though this has a first,
955 * it must be from a subsequent msg.
959 if (control->first_frag_seen) {
961 * In old un-ordered we can reassembly on one
962 * control multiple messages. As long as the next
963 * FIRST is greater then the old first (TSN i.e. FSN
969 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
971 * Easy way the start of a new guy beyond
976 if ((chk->rec.data.fsn == control->fsn_included) ||
977 (control->pdapi_started)) {
979 * Ok this should not happen, if it does we
980 * started the pd-api on the higher TSN
981 * (since the equals part is a TSN failure
984 * We are completly hosed in that case since
985 * I have no way to recover. This really
986 * will only happen if we can get more TSN's
987 * higher before the pd-api-point.
989 sctp_abort_in_reasm(stcb, control, chk,
991 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
996 * Ok we have two firsts and the one we just got is
997 * smaller than the one we previously placed.. yuck!
998 * We must swap them out.
1000 /* swap the mbufs */
1001 tdata = control->data;
1002 control->data = chk->data;
1004 /* Save the lengths */
1005 chk->send_size = control->length;
1006 /* Recompute length of control and tail pointer */
1007 sctp_setup_tail_pointer(control);
1008 /* Fix the FSN included */
1009 tmp = control->fsn_included;
1010 control->fsn_included = chk->rec.data.fsn;
1011 chk->rec.data.fsn = tmp;
1012 /* Fix the TSN included */
1013 tmp = control->sinfo_tsn;
1014 control->sinfo_tsn = chk->rec.data.tsn;
1015 chk->rec.data.tsn = tmp;
1016 /* Fix the PPID included */
1017 tmp = control->sinfo_ppid;
1018 control->sinfo_ppid = chk->rec.data.ppid;
1019 chk->rec.data.ppid = tmp;
1020 /* Fix tail pointer */
1023 control->first_frag_seen = 1;
1024 control->fsn_included = chk->rec.data.fsn;
1025 control->top_fsn = chk->rec.data.fsn;
1026 control->sinfo_tsn = chk->rec.data.tsn;
1027 control->sinfo_ppid = chk->rec.data.ppid;
1028 control->data = chk->data;
1029 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1031 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1032 sctp_setup_tail_pointer(control);
1037 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1038 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1040 * This one in queue is bigger than the new one,
1041 * insert the new one before at.
1043 asoc->size_on_reasm_queue += chk->send_size;
1044 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1046 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1048 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1050 * They sent a duplicate fsn number. This really
1051 * should not happen since the FSN is a TSN and it
1052 * should have been dropped earlier.
1054 sctp_abort_in_reasm(stcb, control, chk,
1056 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1061 if (inserted == 0) {
1062 /* Its at the end */
1063 asoc->size_on_reasm_queue += chk->send_size;
1064 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1065 control->top_fsn = chk->rec.data.fsn;
1066 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1071 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1072 struct sctp_stream_in *strm, int inp_read_lock_held)
1075 * Given a stream, strm, see if any of the SSN's on it that are
1076 * fragmented are ready to deliver. If so go ahead and place them on
1077 * the read queue. In so placing if we have hit the end, then we
1078 * need to remove them from the stream's queue.
1080 struct sctp_queued_to_read *control, *nctl = NULL;
1081 uint32_t next_to_del;
1085 if (stcb->sctp_socket) {
1086 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1087 stcb->sctp_ep->partial_delivery_point);
1089 pd_point = stcb->sctp_ep->partial_delivery_point;
1091 control = TAILQ_FIRST(&strm->uno_inqueue);
1093 if ((control != NULL) &&
1094 (asoc->idata_supported == 0)) {
1095 /* Special handling needed for "old" data format */
1096 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1100 if (strm->pd_api_started) {
1101 /* Can't add more */
1105 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1106 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1107 nctl = TAILQ_NEXT(control, next_instrm);
1108 if (control->end_added) {
1109 /* We just put the last bit on */
1110 if (control->on_strm_q) {
1112 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1113 panic("Huh control: %p on_q: %d -- not unordered?",
1114 control, control->on_strm_q);
1117 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1118 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1119 control->on_strm_q = 0;
1121 if (control->on_read_q == 0) {
1122 sctp_add_to_readq(stcb->sctp_ep, stcb,
1124 &stcb->sctp_socket->so_rcv, control->end_added,
1125 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1128 /* Can we do a PD-API for this un-ordered guy? */
1129 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1130 strm->pd_api_started = 1;
1131 control->pdapi_started = 1;
1132 sctp_add_to_readq(stcb->sctp_ep, stcb,
1134 &stcb->sctp_socket->so_rcv, control->end_added,
1135 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1143 control = TAILQ_FIRST(&strm->inqueue);
1144 if (strm->pd_api_started) {
1145 /* Can't add more */
1148 if (control == NULL) {
1151 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1153 * Ok the guy at the top was being partially delivered
1154 * completed, so we remove it. Note the pd_api flag was
1155 * taken off when the chunk was merged on in
1156 * sctp_queue_data_for_reasm below.
1158 nctl = TAILQ_NEXT(control, next_instrm);
1159 SCTPDBG(SCTP_DEBUG_XXX,
1160 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1161 control, control->end_added, control->mid,
1162 control->top_fsn, control->fsn_included,
1163 strm->last_mid_delivered);
1164 if (control->end_added) {
1165 if (control->on_strm_q) {
1167 if (control->on_strm_q != SCTP_ON_ORDERED) {
1168 panic("Huh control: %p on_q: %d -- not ordered?",
1169 control, control->on_strm_q);
1172 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1173 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1174 if (asoc->size_on_all_streams >= control->length) {
1175 asoc->size_on_all_streams -= control->length;
1178 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1180 asoc->size_on_all_streams = 0;
1183 sctp_ucount_decr(asoc->cnt_on_all_streams);
1184 control->on_strm_q = 0;
1186 if (strm->pd_api_started && control->pdapi_started) {
1187 control->pdapi_started = 0;
1188 strm->pd_api_started = 0;
1190 if (control->on_read_q == 0) {
1191 sctp_add_to_readq(stcb->sctp_ep, stcb,
1193 &stcb->sctp_socket->so_rcv, control->end_added,
1194 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1199 if (strm->pd_api_started) {
1201 * Can't add more must have gotten an un-ordered above being
1202 * partially delivered.
1207 next_to_del = strm->last_mid_delivered + 1;
1209 SCTPDBG(SCTP_DEBUG_XXX,
1210 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1211 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1213 nctl = TAILQ_NEXT(control, next_instrm);
1214 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1215 (control->first_frag_seen)) {
1218 /* Ok we can deliver it onto the stream. */
1219 if (control->end_added) {
1220 /* We are done with it afterwards */
1221 if (control->on_strm_q) {
1223 if (control->on_strm_q != SCTP_ON_ORDERED) {
1224 panic("Huh control: %p on_q: %d -- not ordered?",
1225 control, control->on_strm_q);
1228 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1229 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1230 if (asoc->size_on_all_streams >= control->length) {
1231 asoc->size_on_all_streams -= control->length;
1234 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1236 asoc->size_on_all_streams = 0;
1239 sctp_ucount_decr(asoc->cnt_on_all_streams);
1240 control->on_strm_q = 0;
1244 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1246 * A singleton now slipping through - mark
1247 * it non-revokable too
1249 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1250 } else if (control->end_added == 0) {
1252 * Check if we can defer adding until its
1255 if ((control->length < pd_point) || (strm->pd_api_started)) {
1257 * Don't need it or cannot add more
1258 * (one being delivered that way)
1263 done = (control->end_added) && (control->last_frag_seen);
1264 if (control->on_read_q == 0) {
1266 if (asoc->size_on_all_streams >= control->length) {
1267 asoc->size_on_all_streams -= control->length;
1270 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1272 asoc->size_on_all_streams = 0;
1275 strm->pd_api_started = 1;
1276 control->pdapi_started = 1;
1278 sctp_add_to_readq(stcb->sctp_ep, stcb,
1280 &stcb->sctp_socket->so_rcv, control->end_added,
1281 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1283 strm->last_mid_delivered = next_to_del;
1296 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1297 struct sctp_stream_in *strm,
1298 struct sctp_tcb *stcb, struct sctp_association *asoc,
1299 struct sctp_tmit_chunk *chk, int hold_rlock)
1302 * Given a control and a chunk, merge the data from the chk onto the
1303 * control and free up the chunk resources.
1308 if (control->on_read_q && (hold_rlock == 0)) {
1310 * Its being pd-api'd so we must do some locks.
1312 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1315 if (control->data == NULL) {
1316 control->data = chk->data;
1317 sctp_setup_tail_pointer(control);
1319 sctp_add_to_tail_pointer(control, chk->data, &added);
1321 control->fsn_included = chk->rec.data.fsn;
1322 asoc->size_on_reasm_queue -= chk->send_size;
1323 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1324 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1326 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1327 control->first_frag_seen = 1;
1328 control->sinfo_tsn = chk->rec.data.tsn;
1329 control->sinfo_ppid = chk->rec.data.ppid;
1331 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1333 if ((control->on_strm_q) && (control->on_read_q)) {
1334 if (control->pdapi_started) {
1335 control->pdapi_started = 0;
1336 strm->pd_api_started = 0;
1338 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1340 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1341 control->on_strm_q = 0;
1342 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1344 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1346 * Don't need to decrement
1347 * size_on_all_streams, since control is on
1350 sctp_ucount_decr(asoc->cnt_on_all_streams);
1351 control->on_strm_q = 0;
1353 } else if (control->on_strm_q) {
1354 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1355 control->on_strm_q);
1359 control->end_added = 1;
1360 control->last_frag_seen = 1;
1363 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1365 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1370 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1371 * queue, see if anthing can be delivered. If so pull it off (or as much as
1372 * we can. If we run out of space then we must dump what we can and set the
1373 * appropriate flag to say we queued what we could.
1376 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1377 struct sctp_queued_to_read *control,
1378 struct sctp_tmit_chunk *chk,
1379 int created_control,
1380 int *abort_flag, uint32_t tsn)
1383 struct sctp_tmit_chunk *at, *nat;
1384 struct sctp_stream_in *strm;
1385 int do_wakeup, unordered;
1388 strm = &asoc->strmin[control->sinfo_stream];
1390 * For old un-ordered data chunks.
1392 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1397 /* Must be added to the stream-in queue */
1398 if (created_control) {
1399 if (unordered == 0) {
1400 sctp_ucount_incr(asoc->cnt_on_all_streams);
1402 if (sctp_place_control_in_stream(strm, asoc, control)) {
1403 /* Duplicate SSN? */
1404 sctp_abort_in_reasm(stcb, control, chk,
1406 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1407 sctp_clean_up_control(stcb, control);
1410 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1412 * Ok we created this control and now lets validate
1413 * that its legal i.e. there is a B bit set, if not
1414 * and we have up to the cum-ack then its invalid.
1416 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1417 sctp_abort_in_reasm(stcb, control, chk,
1419 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1424 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1425 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1429 * Ok we must queue the chunk into the reasembly portion: o if its
1430 * the first it goes to the control mbuf. o if its not first but the
1431 * next in sequence it goes to the control, and each succeeding one
1432 * in order also goes. o if its not in order we place it on the list
1435 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1436 /* Its the very first one. */
1437 SCTPDBG(SCTP_DEBUG_XXX,
1438 "chunk is a first fsn: %u becomes fsn_included\n",
1440 if (control->first_frag_seen) {
1442 * Error on senders part, they either sent us two
1443 * data chunks with FIRST, or they sent two
1444 * un-ordered chunks that were fragmented at the
1445 * same time in the same stream.
1447 sctp_abort_in_reasm(stcb, control, chk,
1449 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1452 control->first_frag_seen = 1;
1453 control->sinfo_ppid = chk->rec.data.ppid;
1454 control->sinfo_tsn = chk->rec.data.tsn;
1455 control->fsn_included = chk->rec.data.fsn;
1456 control->data = chk->data;
1457 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1459 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1460 sctp_setup_tail_pointer(control);
1461 asoc->size_on_all_streams += control->length;
1463 /* Place the chunk in our list */
1466 if (control->last_frag_seen == 0) {
1467 /* Still willing to raise highest FSN seen */
1468 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1469 SCTPDBG(SCTP_DEBUG_XXX,
1470 "We have a new top_fsn: %u\n",
1472 control->top_fsn = chk->rec.data.fsn;
1474 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1475 SCTPDBG(SCTP_DEBUG_XXX,
1476 "The last fsn is now in place fsn: %u\n",
1478 control->last_frag_seen = 1;
1480 if (asoc->idata_supported || control->first_frag_seen) {
1482 * For IDATA we always check since we know
1483 * that the first fragment is 0. For old
1484 * DATA we have to receive the first before
1485 * we know the first FSN (which is the TSN).
1487 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1489 * We have already delivered up to
1492 sctp_abort_in_reasm(stcb, control, chk,
1494 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1499 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1500 /* Second last? huh? */
1501 SCTPDBG(SCTP_DEBUG_XXX,
1502 "Duplicate last fsn: %u (top: %u) -- abort\n",
1503 chk->rec.data.fsn, control->top_fsn);
1504 sctp_abort_in_reasm(stcb, control,
1506 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1509 if (asoc->idata_supported || control->first_frag_seen) {
1511 * For IDATA we always check since we know
1512 * that the first fragment is 0. For old
1513 * DATA we have to receive the first before
1514 * we know the first FSN (which is the TSN).
1517 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1519 * We have already delivered up to
1522 SCTPDBG(SCTP_DEBUG_XXX,
1523 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1524 chk->rec.data.fsn, control->fsn_included);
1525 sctp_abort_in_reasm(stcb, control, chk,
1527 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1532 * validate not beyond top FSN if we have seen last
1535 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1536 SCTPDBG(SCTP_DEBUG_XXX,
1537 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1540 sctp_abort_in_reasm(stcb, control, chk,
1542 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1547 * If we reach here, we need to place the new chunk in the
1548 * reassembly for this control.
1550 SCTPDBG(SCTP_DEBUG_XXX,
1551 "chunk is a not first fsn: %u needs to be inserted\n",
1553 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1554 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1556 * This one in queue is bigger than the new
1557 * one, insert the new one before at.
1559 SCTPDBG(SCTP_DEBUG_XXX,
1560 "Insert it before fsn: %u\n",
1562 asoc->size_on_reasm_queue += chk->send_size;
1563 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1564 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1567 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1569 * Gak, He sent me a duplicate str seq
1573 * foo bar, I guess I will just free this
1574 * new guy, should we abort too? FIX ME
1575 * MAYBE? Or it COULD be that the SSN's have
1576 * wrapped. Maybe I should compare to TSN
1577 * somehow... sigh for now just blow away
1580 SCTPDBG(SCTP_DEBUG_XXX,
1581 "Duplicate to fsn: %u -- abort\n",
1583 sctp_abort_in_reasm(stcb, control,
1585 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1589 if (inserted == 0) {
1590 /* Goes on the end */
1591 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1593 asoc->size_on_reasm_queue += chk->send_size;
1594 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1595 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1599 * Ok lets see if we can suck any up into the control structure that
1600 * are in seq if it makes sense.
1604 * If the first fragment has not been seen there is no sense in
1607 if (control->first_frag_seen) {
1608 next_fsn = control->fsn_included + 1;
1609 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1610 if (at->rec.data.fsn == next_fsn) {
1611 /* We can add this one now to the control */
1612 SCTPDBG(SCTP_DEBUG_XXX,
1613 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1616 next_fsn, control->fsn_included);
1617 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1618 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1619 if (control->on_read_q) {
1623 * We only add to the
1624 * size-on-all-streams if its not on
1625 * the read q. The read q flag will
1626 * cause a sballoc so its accounted
1629 asoc->size_on_all_streams += lenadded;
1632 if (control->end_added && control->pdapi_started) {
1633 if (strm->pd_api_started) {
1634 strm->pd_api_started = 0;
1635 control->pdapi_started = 0;
1637 if (control->on_read_q == 0) {
1638 sctp_add_to_readq(stcb->sctp_ep, stcb,
1640 &stcb->sctp_socket->so_rcv, control->end_added,
1641 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1651 /* Need to wakeup the reader */
1652 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1656 static struct sctp_queued_to_read *
1657 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1659 struct sctp_queued_to_read *control;
1662 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1663 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1668 if (idata_supported) {
1669 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1670 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1675 control = TAILQ_FIRST(&strm->uno_inqueue);
1682 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1683 struct mbuf **m, int offset, int chk_length,
1684 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1685 int *break_flag, int last_chunk, uint8_t chk_type)
1687 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1688 uint32_t tsn, fsn, gap, mid;
1691 int need_reasm_check = 0;
1693 struct mbuf *op_err;
1694 char msg[SCTP_DIAG_INFO_LEN];
1695 struct sctp_queued_to_read *control, *ncontrol;
1698 struct sctp_stream_reset_list *liste;
1701 int created_control = 0;
1703 if (chk_type == SCTP_IDATA) {
1704 struct sctp_idata_chunk *chunk, chunk_buf;
1706 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1707 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1708 chk_flags = chunk->ch.chunk_flags;
1709 clen = sizeof(struct sctp_idata_chunk);
1710 tsn = ntohl(chunk->dp.tsn);
1711 sid = ntohs(chunk->dp.sid);
1712 mid = ntohl(chunk->dp.mid);
1713 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1715 ppid = chunk->dp.ppid_fsn.ppid;
1717 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1718 ppid = 0xffffffff; /* Use as an invalid value. */
1721 struct sctp_data_chunk *chunk, chunk_buf;
1723 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1724 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1725 chk_flags = chunk->ch.chunk_flags;
1726 clen = sizeof(struct sctp_data_chunk);
1727 tsn = ntohl(chunk->dp.tsn);
1728 sid = ntohs(chunk->dp.sid);
1729 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1731 ppid = chunk->dp.ppid;
1733 if ((size_t)chk_length == clen) {
1735 * Need to send an abort since we had a empty data chunk.
1737 op_err = sctp_generate_no_user_data_cause(tsn);
1738 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1739 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1743 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1744 asoc->send_sack = 1;
1746 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1747 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1748 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1753 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1754 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1755 /* It is a duplicate */
1756 SCTP_STAT_INCR(sctps_recvdupdata);
1757 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1758 /* Record a dup for the next outbound sack */
1759 asoc->dup_tsns[asoc->numduptsns] = tsn;
1762 asoc->send_sack = 1;
1765 /* Calculate the number of TSN's between the base and this TSN */
1766 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1767 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1768 /* Can't hold the bit in the mapping at max array, toss it */
1771 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1772 SCTP_TCB_LOCK_ASSERT(stcb);
1773 if (sctp_expand_mapping_array(asoc, gap)) {
1774 /* Can't expand, drop it */
1778 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1781 /* See if we have received this one already */
1782 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1783 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1784 SCTP_STAT_INCR(sctps_recvdupdata);
1785 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1786 /* Record a dup for the next outbound sack */
1787 asoc->dup_tsns[asoc->numduptsns] = tsn;
1790 asoc->send_sack = 1;
1794 * Check to see about the GONE flag, duplicates would cause a sack
1795 * to be sent up above
1797 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1798 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1799 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1801 * wait a minute, this guy is gone, there is no longer a
1802 * receiver. Send peer an ABORT!
1804 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1805 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1810 * Now before going further we see if there is room. If NOT then we
1811 * MAY let one through only IF this TSN is the one we are waiting
1812 * for on a partial delivery API.
1815 /* Is the stream valid? */
1816 if (sid >= asoc->streamincnt) {
1817 struct sctp_error_invalid_stream *cause;
1819 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1820 0, M_NOWAIT, 1, MT_DATA);
1821 if (op_err != NULL) {
1822 /* add some space up front so prepend will work well */
1823 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1824 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1826 * Error causes are just param's and this one has
1827 * two back to back phdr, one with the error type
1828 * and size, the other with the streamid and a rsvd
1830 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1831 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1832 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1833 cause->stream_id = htons(sid);
1834 cause->reserved = htons(0);
1835 sctp_queue_op_err(stcb, op_err);
1837 SCTP_STAT_INCR(sctps_badsid);
1838 SCTP_TCB_LOCK_ASSERT(stcb);
1839 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1840 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1841 asoc->highest_tsn_inside_nr_map = tsn;
1843 if (tsn == (asoc->cumulative_tsn + 1)) {
1844 /* Update cum-ack */
1845 asoc->cumulative_tsn = tsn;
1850 * If its a fragmented message, lets see if we can find the control
1851 * on the reassembly queues.
1853 if ((chk_type == SCTP_IDATA) &&
1854 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1857 * The first *must* be fsn 0, and other (middle/end) pieces
1858 * can *not* be fsn 0. XXX: This can happen in case of a
1859 * wrap around. Ignore is for now.
1861 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1865 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1866 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1867 chk_flags, control);
1868 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1869 /* See if we can find the re-assembly entity */
1870 if (control != NULL) {
1871 /* We found something, does it belong? */
1872 if (ordered && (mid != control->mid)) {
1873 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1875 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1876 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1877 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1881 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1883 * We can't have a switched order with an
1886 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1890 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1892 * We can't have a switched unordered with a
1895 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1902 * Its a complete segment. Lets validate we don't have a
1903 * re-assembly going on with the same Stream/Seq (for
1904 * ordered) or in the same Stream for unordered.
1906 if (control != NULL) {
1907 if (ordered || asoc->idata_supported) {
1908 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1910 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1913 if ((tsn == control->fsn_included + 1) &&
1914 (control->end_added == 0)) {
1915 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1923 /* now do the tests */
1924 if (((asoc->cnt_on_all_streams +
1925 asoc->cnt_on_reasm_queue +
1926 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1927 (((int)asoc->my_rwnd) <= 0)) {
1929 * When we have NO room in the rwnd we check to make sure
1930 * the reader is doing its job...
1932 if (stcb->sctp_socket->so_rcv.sb_cc) {
1933 /* some to read, wake-up */
1934 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1937 so = SCTP_INP_SO(stcb->sctp_ep);
1938 atomic_add_int(&stcb->asoc.refcnt, 1);
1939 SCTP_TCB_UNLOCK(stcb);
1940 SCTP_SOCKET_LOCK(so, 1);
1941 SCTP_TCB_LOCK(stcb);
1942 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1943 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1944 /* assoc was freed while we were unlocked */
1945 SCTP_SOCKET_UNLOCK(so, 1);
1949 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1950 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1951 SCTP_SOCKET_UNLOCK(so, 1);
1954 /* now is it in the mapping array of what we have accepted? */
1955 if (chk_type == SCTP_DATA) {
1956 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1957 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1958 /* Nope not in the valid range dump it */
1960 sctp_set_rwnd(stcb, asoc);
1961 if ((asoc->cnt_on_all_streams +
1962 asoc->cnt_on_reasm_queue +
1963 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1964 SCTP_STAT_INCR(sctps_datadropchklmt);
1966 SCTP_STAT_INCR(sctps_datadroprwnd);
1972 if (control == NULL) {
1975 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1980 #ifdef SCTP_ASOCLOG_OF_TSNS
1981 SCTP_TCB_LOCK_ASSERT(stcb);
1982 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1983 asoc->tsn_in_at = 0;
1984 asoc->tsn_in_wrapped = 1;
1986 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1987 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1988 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1989 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1990 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1991 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1992 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1993 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1997 * Before we continue lets validate that we are not being fooled by
1998 * an evil attacker. We can only have Nk chunks based on our TSN
1999 * spread allowed by the mapping array N * 8 bits, so there is no
2000 * way our stream sequence numbers could have wrapped. We of course
2001 * only validate the FIRST fragment so the bit must be set.
2003 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2004 (TAILQ_EMPTY(&asoc->resetHead)) &&
2005 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2006 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2007 /* The incoming sseq is behind where we last delivered? */
2008 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2009 mid, asoc->strmin[sid].last_mid_delivered);
2011 if (asoc->idata_supported) {
2012 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2013 asoc->strmin[sid].last_mid_delivered,
2018 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2019 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2024 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2025 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2026 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2030 if (chk_type == SCTP_IDATA) {
2031 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2033 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2035 if (last_chunk == 0) {
2036 if (chk_type == SCTP_IDATA) {
2037 dmbuf = SCTP_M_COPYM(*m,
2038 (offset + sizeof(struct sctp_idata_chunk)),
2041 dmbuf = SCTP_M_COPYM(*m,
2042 (offset + sizeof(struct sctp_data_chunk)),
2045 #ifdef SCTP_MBUF_LOGGING
2046 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2047 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2051 /* We can steal the last chunk */
2055 /* lop off the top part */
2056 if (chk_type == SCTP_IDATA) {
2057 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2059 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2061 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2062 l_len = SCTP_BUF_LEN(dmbuf);
2065 * need to count up the size hopefully does not hit
2071 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2072 l_len += SCTP_BUF_LEN(lat);
2075 if (l_len > the_len) {
2076 /* Trim the end round bytes off too */
2077 m_adj(dmbuf, -(l_len - the_len));
2080 if (dmbuf == NULL) {
2081 SCTP_STAT_INCR(sctps_nomem);
2085 * Now no matter what, we need a control, get one if we don't have
2086 * one (we may have gotten it above when we found the message was
2089 if (control == NULL) {
2090 sctp_alloc_a_readq(stcb, control);
2091 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2096 if (control == NULL) {
2097 SCTP_STAT_INCR(sctps_nomem);
2100 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2103 control->data = dmbuf;
2104 for (mm = control->data; mm; mm = mm->m_next) {
2105 control->length += SCTP_BUF_LEN(mm);
2107 control->tail_mbuf = NULL;
2108 control->end_added = 1;
2109 control->last_frag_seen = 1;
2110 control->first_frag_seen = 1;
2111 control->fsn_included = fsn;
2112 control->top_fsn = fsn;
2114 created_control = 1;
2116 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2117 chk_flags, ordered, mid, control);
2118 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2119 TAILQ_EMPTY(&asoc->resetHead) &&
2121 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2122 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2123 /* Candidate for express delivery */
2125 * Its not fragmented, No PD-API is up, Nothing in the
2126 * delivery queue, Its un-ordered OR ordered and the next to
2127 * deliver AND nothing else is stuck on the stream queue,
2128 * And there is room for it in the socket buffer. Lets just
2129 * stuff it up the buffer....
2131 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2132 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2133 asoc->highest_tsn_inside_nr_map = tsn;
2135 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2138 sctp_add_to_readq(stcb->sctp_ep, stcb,
2139 control, &stcb->sctp_socket->so_rcv,
2140 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2142 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2143 /* for ordered, bump what we delivered */
2144 asoc->strmin[sid].last_mid_delivered++;
2146 SCTP_STAT_INCR(sctps_recvexpress);
2147 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2148 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2149 SCTP_STR_LOG_FROM_EXPRS_DEL);
2152 goto finish_express_del;
2155 /* Now will we need a chunk too? */
2156 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2157 sctp_alloc_a_chunk(stcb, chk);
2159 /* No memory so we drop the chunk */
2160 SCTP_STAT_INCR(sctps_nomem);
2161 if (last_chunk == 0) {
2162 /* we copied it, free the copy */
2163 sctp_m_freem(dmbuf);
2167 chk->rec.data.tsn = tsn;
2168 chk->no_fr_allowed = 0;
2169 chk->rec.data.fsn = fsn;
2170 chk->rec.data.mid = mid;
2171 chk->rec.data.sid = sid;
2172 chk->rec.data.ppid = ppid;
2173 chk->rec.data.context = stcb->asoc.context;
2174 chk->rec.data.doing_fast_retransmit = 0;
2175 chk->rec.data.rcv_flags = chk_flags;
2177 chk->send_size = the_len;
2179 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2182 atomic_add_int(&net->ref_count, 1);
2185 /* Set the appropriate TSN mark */
2186 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2187 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2188 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2189 asoc->highest_tsn_inside_nr_map = tsn;
2192 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2193 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2194 asoc->highest_tsn_inside_map = tsn;
2197 /* Now is it complete (i.e. not fragmented)? */
2198 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2200 * Special check for when streams are resetting. We could be
2201 * more smart about this and check the actual stream to see
2202 * if it is not being reset.. that way we would not create a
2203 * HOLB when amongst streams being reset and those not being
2207 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2208 SCTP_TSN_GT(tsn, liste->tsn)) {
2210 * yep its past where we need to reset... go ahead
2213 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2215 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2217 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2218 unsigned char inserted = 0;
2220 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2221 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2226 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2231 if (inserted == 0) {
2233 * must be put at end, use prevP
2234 * (all setup from loop) to setup
2237 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2240 goto finish_express_del;
2242 if (chk_flags & SCTP_DATA_UNORDERED) {
2243 /* queue directly into socket buffer */
2244 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2246 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2247 sctp_add_to_readq(stcb->sctp_ep, stcb,
2249 &stcb->sctp_socket->so_rcv, 1,
2250 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2253 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2255 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2263 goto finish_express_del;
2265 /* If we reach here its a reassembly */
2266 need_reasm_check = 1;
2267 SCTPDBG(SCTP_DEBUG_XXX,
2268 "Queue data to stream for reasm control: %p MID: %u\n",
2270 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2273 * the assoc is now gone and chk was put onto the reasm
2274 * queue, which has all been freed.
2282 /* Here we tidy up things */
2283 if (tsn == (asoc->cumulative_tsn + 1)) {
2284 /* Update cum-ack */
2285 asoc->cumulative_tsn = tsn;
2291 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2293 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2295 SCTP_STAT_INCR(sctps_recvdata);
2296 /* Set it present please */
2297 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2298 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2301 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2302 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2304 if (need_reasm_check) {
2305 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2306 need_reasm_check = 0;
2308 /* check the special flag for stream resets */
2309 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2310 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2312 * we have finished working through the backlogged TSN's now
2313 * time to reset streams. 1: call reset function. 2: free
2314 * pending_reply space 3: distribute any chunks in
2315 * pending_reply_queue.
2317 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2318 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2319 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2320 SCTP_FREE(liste, SCTP_M_STRESET);
2321 /* sa_ignore FREED_MEMORY */
2322 liste = TAILQ_FIRST(&asoc->resetHead);
2323 if (TAILQ_EMPTY(&asoc->resetHead)) {
2324 /* All can be removed */
2325 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2326 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2327 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2331 if (need_reasm_check) {
2332 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2333 need_reasm_check = 0;
2337 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2338 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2342 * if control->sinfo_tsn is <= liste->tsn we
2343 * can process it which is the NOT of
2344 * control->sinfo_tsn > liste->tsn
2346 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2347 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2351 if (need_reasm_check) {
2352 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2353 need_reasm_check = 0;
2361 static const int8_t sctp_map_lookup_tab[256] = {
2362 0, 1, 0, 2, 0, 1, 0, 3,
2363 0, 1, 0, 2, 0, 1, 0, 4,
2364 0, 1, 0, 2, 0, 1, 0, 3,
2365 0, 1, 0, 2, 0, 1, 0, 5,
2366 0, 1, 0, 2, 0, 1, 0, 3,
2367 0, 1, 0, 2, 0, 1, 0, 4,
2368 0, 1, 0, 2, 0, 1, 0, 3,
2369 0, 1, 0, 2, 0, 1, 0, 6,
2370 0, 1, 0, 2, 0, 1, 0, 3,
2371 0, 1, 0, 2, 0, 1, 0, 4,
2372 0, 1, 0, 2, 0, 1, 0, 3,
2373 0, 1, 0, 2, 0, 1, 0, 5,
2374 0, 1, 0, 2, 0, 1, 0, 3,
2375 0, 1, 0, 2, 0, 1, 0, 4,
2376 0, 1, 0, 2, 0, 1, 0, 3,
2377 0, 1, 0, 2, 0, 1, 0, 7,
2378 0, 1, 0, 2, 0, 1, 0, 3,
2379 0, 1, 0, 2, 0, 1, 0, 4,
2380 0, 1, 0, 2, 0, 1, 0, 3,
2381 0, 1, 0, 2, 0, 1, 0, 5,
2382 0, 1, 0, 2, 0, 1, 0, 3,
2383 0, 1, 0, 2, 0, 1, 0, 4,
2384 0, 1, 0, 2, 0, 1, 0, 3,
2385 0, 1, 0, 2, 0, 1, 0, 6,
2386 0, 1, 0, 2, 0, 1, 0, 3,
2387 0, 1, 0, 2, 0, 1, 0, 4,
2388 0, 1, 0, 2, 0, 1, 0, 3,
2389 0, 1, 0, 2, 0, 1, 0, 5,
2390 0, 1, 0, 2, 0, 1, 0, 3,
2391 0, 1, 0, 2, 0, 1, 0, 4,
2392 0, 1, 0, 2, 0, 1, 0, 3,
2393 0, 1, 0, 2, 0, 1, 0, 8
2398 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2401 * Now we also need to check the mapping array in a couple of ways.
2402 * 1) Did we move the cum-ack point?
2404 * When you first glance at this you might think that all entries
2405 * that make up the position of the cum-ack would be in the
2406 * nr-mapping array only.. i.e. things up to the cum-ack are always
2407 * deliverable. Thats true with one exception, when its a fragmented
2408 * message we may not deliver the data until some threshold (or all
2409 * of it) is in place. So we must OR the nr_mapping_array and
2410 * mapping_array to get a true picture of the cum-ack.
2412 struct sctp_association *asoc;
2415 int slide_from, slide_end, lgap, distance;
2416 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2420 old_cumack = asoc->cumulative_tsn;
2421 old_base = asoc->mapping_array_base_tsn;
2422 old_highest = asoc->highest_tsn_inside_map;
2424 * We could probably improve this a small bit by calculating the
2425 * offset of the current cum-ack as the starting point.
2428 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2429 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2433 /* there is a 0 bit */
2434 at += sctp_map_lookup_tab[val];
2438 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2440 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2441 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2443 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2444 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2446 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2447 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2448 sctp_print_mapping_array(asoc);
2449 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2450 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2452 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2453 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2456 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2457 highest_tsn = asoc->highest_tsn_inside_nr_map;
2459 highest_tsn = asoc->highest_tsn_inside_map;
2461 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2462 /* The complete array was completed by a single FR */
2463 /* highest becomes the cum-ack */
2469 /* clear the array */
2470 clr = ((at + 7) >> 3);
2471 if (clr > asoc->mapping_array_size) {
2472 clr = asoc->mapping_array_size;
2474 memset(asoc->mapping_array, 0, clr);
2475 memset(asoc->nr_mapping_array, 0, clr);
2477 for (i = 0; i < asoc->mapping_array_size; i++) {
2478 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2479 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2480 sctp_print_mapping_array(asoc);
2484 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2485 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2486 } else if (at >= 8) {
2487 /* we can slide the mapping array down */
2488 /* slide_from holds where we hit the first NON 0xff byte */
2491 * now calculate the ceiling of the move using our highest
2494 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2495 slide_end = (lgap >> 3);
2496 if (slide_end < slide_from) {
2497 sctp_print_mapping_array(asoc);
2499 panic("impossible slide");
2501 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2502 lgap, slide_end, slide_from, at);
2506 if (slide_end > asoc->mapping_array_size) {
2508 panic("would overrun buffer");
2510 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2511 asoc->mapping_array_size, slide_end);
2512 slide_end = asoc->mapping_array_size;
2515 distance = (slide_end - slide_from) + 1;
2516 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2517 sctp_log_map(old_base, old_cumack, old_highest,
2518 SCTP_MAP_PREPARE_SLIDE);
2519 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2520 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2522 if (distance + slide_from > asoc->mapping_array_size ||
2525 * Here we do NOT slide forward the array so that
2526 * hopefully when more data comes in to fill it up
2527 * we will be able to slide it forward. Really I
2528 * don't think this should happen :-0
2531 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2532 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2533 (uint32_t)asoc->mapping_array_size,
2534 SCTP_MAP_SLIDE_NONE);
2539 for (ii = 0; ii < distance; ii++) {
2540 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2541 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2544 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2545 asoc->mapping_array[ii] = 0;
2546 asoc->nr_mapping_array[ii] = 0;
2548 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2549 asoc->highest_tsn_inside_map += (slide_from << 3);
2551 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2552 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2554 asoc->mapping_array_base_tsn += (slide_from << 3);
2555 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2556 sctp_log_map(asoc->mapping_array_base_tsn,
2557 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2558 SCTP_MAP_SLIDE_RESULT);
2565 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2567 struct sctp_association *asoc;
2568 uint32_t highest_tsn;
2571 sctp_slide_mapping_arrays(stcb);
2573 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2574 highest_tsn = asoc->highest_tsn_inside_nr_map;
2576 highest_tsn = asoc->highest_tsn_inside_map;
2578 /* Is there a gap now? */
2579 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2582 * Now we need to see if we need to queue a sack or just start the
2583 * timer (if allowed).
2585 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2587 * Ok special case, in SHUTDOWN-SENT case. here we maker
2588 * sure SACK timer is off and instead send a SHUTDOWN and a
2591 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2592 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2593 stcb->sctp_ep, stcb, NULL,
2594 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2596 sctp_send_shutdown(stcb,
2597 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2599 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2603 * CMT DAC algorithm: increase number of packets received
2606 stcb->asoc.cmt_dac_pkts_rcvd++;
2608 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2610 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2612 (stcb->asoc.numduptsns) || /* we have dup's */
2613 (is_a_gap) || /* is still a gap */
2614 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2615 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2618 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2619 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2620 (stcb->asoc.send_sack == 0) &&
2621 (stcb->asoc.numduptsns == 0) &&
2622 (stcb->asoc.delayed_ack) &&
2623 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2626 * CMT DAC algorithm: With CMT, delay acks
2627 * even in the face of
2629 * reordering. Therefore, if acks that do
2630 * not have to be sent because of the above
2631 * reasons, will be delayed. That is, acks
2632 * that would have been sent due to gap
2633 * reports will be delayed with DAC. Start
2634 * the delayed ack timer.
2636 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2637 stcb->sctp_ep, stcb, NULL);
2640 * Ok we must build a SACK since the timer
2641 * is pending, we got our first packet OR
2642 * there are gaps or duplicates.
2644 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2645 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2648 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2649 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2650 stcb->sctp_ep, stcb, NULL);
2657 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2658 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2659 struct sctp_nets *net, uint32_t *high_tsn)
2661 struct sctp_chunkhdr *ch, chunk_buf;
2662 struct sctp_association *asoc;
2663 int num_chunks = 0; /* number of control chunks processed */
2665 int break_flag, last_chunk;
2666 int abort_flag = 0, was_a_gap;
2668 uint32_t highest_tsn;
2669 uint16_t chk_length;
2672 sctp_set_rwnd(stcb, &stcb->asoc);
2675 SCTP_TCB_LOCK_ASSERT(stcb);
2677 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2678 highest_tsn = asoc->highest_tsn_inside_nr_map;
2680 highest_tsn = asoc->highest_tsn_inside_map;
2682 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2684 * setup where we got the last DATA packet from for any SACK that
2685 * may need to go out. Don't bump the net. This is done ONLY when a
2686 * chunk is assigned.
2688 asoc->last_data_chunk_from = net;
2691 * Now before we proceed we must figure out if this is a wasted
2692 * cluster... i.e. it is a small packet sent in and yet the driver
2693 * underneath allocated a full cluster for it. If so we must copy it
2694 * to a smaller mbuf and free up the cluster mbuf. This will help
2695 * with cluster starvation. Note for __Panda__ we don't do this
2696 * since it has clusters all the way down to 64 bytes.
2698 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2699 /* we only handle mbufs that are singletons.. not chains */
2700 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2702 /* ok lets see if we can copy the data up */
2705 /* get the pointers and copy */
2706 to = mtod(m, caddr_t *);
2707 from = mtod((*mm), caddr_t *);
2708 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2709 /* copy the length and free up the old */
2710 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2712 /* success, back copy */
2715 /* We are in trouble in the mbuf world .. yikes */
2719 /* get pointer to the first chunk header */
2720 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2721 sizeof(struct sctp_chunkhdr),
2722 (uint8_t *)&chunk_buf);
2727 * process all DATA chunks...
2729 *high_tsn = asoc->cumulative_tsn;
2731 asoc->data_pkts_seen++;
2732 while (stop_proc == 0) {
2733 /* validate chunk length */
2734 chk_length = ntohs(ch->chunk_length);
2735 if (length - *offset < chk_length) {
2736 /* all done, mutulated chunk */
2740 if ((asoc->idata_supported == 1) &&
2741 (ch->chunk_type == SCTP_DATA)) {
2742 struct mbuf *op_err;
2743 char msg[SCTP_DIAG_INFO_LEN];
2745 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2746 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2747 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2748 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2751 if ((asoc->idata_supported == 0) &&
2752 (ch->chunk_type == SCTP_IDATA)) {
2753 struct mbuf *op_err;
2754 char msg[SCTP_DIAG_INFO_LEN];
2756 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2757 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2758 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2759 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2762 if ((ch->chunk_type == SCTP_DATA) ||
2763 (ch->chunk_type == SCTP_IDATA)) {
2766 if (ch->chunk_type == SCTP_DATA) {
2767 clen = sizeof(struct sctp_data_chunk);
2769 clen = sizeof(struct sctp_idata_chunk);
2771 if (chk_length < clen) {
2773 * Need to send an abort since we had a
2774 * invalid data chunk.
2776 struct mbuf *op_err;
2777 char msg[SCTP_DIAG_INFO_LEN];
2779 snprintf(msg, sizeof(msg), "%s chunk of length %u",
2780 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2782 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2783 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2784 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2787 #ifdef SCTP_AUDITING_ENABLED
2788 sctp_audit_log(0xB1, 0);
2790 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2795 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2796 chk_length, net, high_tsn, &abort_flag, &break_flag,
2797 last_chunk, ch->chunk_type)) {
2805 * Set because of out of rwnd space and no
2806 * drop rep space left.
2812 /* not a data chunk in the data region */
2813 switch (ch->chunk_type) {
2814 case SCTP_INITIATION:
2815 case SCTP_INITIATION_ACK:
2816 case SCTP_SELECTIVE_ACK:
2817 case SCTP_NR_SELECTIVE_ACK:
2818 case SCTP_HEARTBEAT_REQUEST:
2819 case SCTP_HEARTBEAT_ACK:
2820 case SCTP_ABORT_ASSOCIATION:
2822 case SCTP_SHUTDOWN_ACK:
2823 case SCTP_OPERATION_ERROR:
2824 case SCTP_COOKIE_ECHO:
2825 case SCTP_COOKIE_ACK:
2828 case SCTP_SHUTDOWN_COMPLETE:
2829 case SCTP_AUTHENTICATION:
2830 case SCTP_ASCONF_ACK:
2831 case SCTP_PACKET_DROPPED:
2832 case SCTP_STREAM_RESET:
2833 case SCTP_FORWARD_CUM_TSN:
2837 * Now, what do we do with KNOWN
2838 * chunks that are NOT in the right
2841 * For now, I do nothing but ignore
2842 * them. We may later want to add
2843 * sysctl stuff to switch out and do
2844 * either an ABORT() or possibly
2847 struct mbuf *op_err;
2848 char msg[SCTP_DIAG_INFO_LEN];
2850 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2852 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2853 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2858 * Unknown chunk type: use bit rules after
2861 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2863 * Need to send an abort since we
2864 * had a invalid chunk.
2866 struct mbuf *op_err;
2867 char msg[SCTP_DIAG_INFO_LEN];
2869 snprintf(msg, sizeof(msg), "Chunk of length %u",
2871 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2872 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2873 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2876 if (ch->chunk_type & 0x40) {
2877 /* Add a error report to the queue */
2878 struct mbuf *op_err;
2879 struct sctp_gen_error_cause *cause;
2881 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2882 0, M_NOWAIT, 1, MT_DATA);
2883 if (op_err != NULL) {
2884 cause = mtod(op_err, struct sctp_gen_error_cause *);
2885 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2886 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2887 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2888 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2889 if (SCTP_BUF_NEXT(op_err) != NULL) {
2890 sctp_queue_op_err(stcb, op_err);
2892 sctp_m_freem(op_err);
2896 if ((ch->chunk_type & 0x80) == 0) {
2897 /* discard the rest of this packet */
2899 } /* else skip this bad chunk and
2902 } /* switch of chunk type */
2904 *offset += SCTP_SIZE32(chk_length);
2905 if ((*offset >= length) || stop_proc) {
2906 /* no more data left in the mbuf chain */
2910 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2911 sizeof(struct sctp_chunkhdr),
2912 (uint8_t *)&chunk_buf);
2921 * we need to report rwnd overrun drops.
2923 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2927 * Did we get data, if so update the time for auto-close and
2928 * give peer credit for being alive.
2930 SCTP_STAT_INCR(sctps_recvpktwithdata);
2931 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2932 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2933 stcb->asoc.overall_error_count,
2935 SCTP_FROM_SCTP_INDATA,
2938 stcb->asoc.overall_error_count = 0;
2939 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2941 /* now service all of the reassm queue if needed */
2942 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2943 /* Assure that we ack right away */
2944 stcb->asoc.send_sack = 1;
2946 /* Start a sack timer or QUEUE a SACK for sending */
2947 sctp_sack_check(stcb, was_a_gap);
2952 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2953 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2955 uint32_t *biggest_newly_acked_tsn,
2956 uint32_t *this_sack_lowest_newack,
2959 struct sctp_tmit_chunk *tp1;
2960 unsigned int theTSN;
2961 int j, wake_him = 0, circled = 0;
2963 /* Recover the tp1 we last saw */
2966 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2968 for (j = frag_strt; j <= frag_end; j++) {
2969 theTSN = j + last_tsn;
2971 if (tp1->rec.data.doing_fast_retransmit)
2975 * CMT: CUCv2 algorithm. For each TSN being
2976 * processed from the sent queue, track the
2977 * next expected pseudo-cumack, or
2978 * rtx_pseudo_cumack, if required. Separate
2979 * cumack trackers for first transmissions,
2980 * and retransmissions.
2982 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2983 (tp1->whoTo->find_pseudo_cumack == 1) &&
2984 (tp1->snd_count == 1)) {
2985 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2986 tp1->whoTo->find_pseudo_cumack = 0;
2988 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2989 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2990 (tp1->snd_count > 1)) {
2991 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2992 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2994 if (tp1->rec.data.tsn == theTSN) {
2995 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2997 * must be held until
3000 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3002 * If it is less than RESEND, it is
3003 * now no-longer in flight.
3004 * Higher values may already be set
3005 * via previous Gap Ack Blocks...
3006 * i.e. ACKED or RESEND.
3008 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3009 *biggest_newly_acked_tsn)) {
3010 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3013 * CMT: SFR algo (and HTNA) - set
3014 * saw_newack to 1 for dest being
3015 * newly acked. update
3016 * this_sack_highest_newack if
3019 if (tp1->rec.data.chunk_was_revoked == 0)
3020 tp1->whoTo->saw_newack = 1;
3022 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3023 tp1->whoTo->this_sack_highest_newack)) {
3024 tp1->whoTo->this_sack_highest_newack =
3028 * CMT DAC algo: also update
3029 * this_sack_lowest_newack
3031 if (*this_sack_lowest_newack == 0) {
3032 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3033 sctp_log_sack(*this_sack_lowest_newack,
3038 SCTP_LOG_TSN_ACKED);
3040 *this_sack_lowest_newack = tp1->rec.data.tsn;
3043 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3044 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3045 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3046 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3047 * Separate pseudo_cumack trackers for first transmissions and
3050 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3051 if (tp1->rec.data.chunk_was_revoked == 0) {
3052 tp1->whoTo->new_pseudo_cumack = 1;
3054 tp1->whoTo->find_pseudo_cumack = 1;
3056 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3057 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3059 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3060 if (tp1->rec.data.chunk_was_revoked == 0) {
3061 tp1->whoTo->new_pseudo_cumack = 1;
3063 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3065 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3066 sctp_log_sack(*biggest_newly_acked_tsn,
3071 SCTP_LOG_TSN_ACKED);
3073 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3074 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3075 tp1->whoTo->flight_size,
3077 (uint32_t)(uintptr_t)tp1->whoTo,
3080 sctp_flight_size_decrease(tp1);
3081 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3082 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3085 sctp_total_flight_decrease(stcb, tp1);
3087 tp1->whoTo->net_ack += tp1->send_size;
3088 if (tp1->snd_count < 2) {
3090 * True non-retransmitted chunk
3092 tp1->whoTo->net_ack2 += tp1->send_size;
3100 sctp_calculate_rto(stcb,
3103 &tp1->sent_rcv_time,
3104 SCTP_RTT_FROM_DATA);
3107 if (tp1->whoTo->rto_needed == 0) {
3108 tp1->whoTo->rto_needed = 1;
3115 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3116 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3117 stcb->asoc.this_sack_highest_gap)) {
3118 stcb->asoc.this_sack_highest_gap =
3121 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3122 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3123 #ifdef SCTP_AUDITING_ENABLED
3124 sctp_audit_log(0xB2,
3125 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3130 * All chunks NOT UNSENT fall through here and are marked
3131 * (leave PR-SCTP ones that are to skip alone though)
3133 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3134 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3135 tp1->sent = SCTP_DATAGRAM_MARKED;
3137 if (tp1->rec.data.chunk_was_revoked) {
3138 /* deflate the cwnd */
3139 tp1->whoTo->cwnd -= tp1->book_size;
3140 tp1->rec.data.chunk_was_revoked = 0;
3142 /* NR Sack code here */
3144 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3145 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3146 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3149 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3152 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3153 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3154 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3155 stcb->asoc.trigger_reset = 1;
3157 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3163 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3164 sctp_m_freem(tp1->data);
3171 } /* if (tp1->tsn == theTSN) */
3172 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3175 tp1 = TAILQ_NEXT(tp1, sctp_next);
3176 if ((tp1 == NULL) && (circled == 0)) {
3178 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3180 } /* end while (tp1) */
3183 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3185 /* In case the fragments were not in order we must reset */
3186 } /* end for (j = fragStart */
3188 return (wake_him); /* Return value only used for nr-sack */
3193 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3194 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3195 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3196 int num_seg, int num_nr_seg, int *rto_ok)
3198 struct sctp_gap_ack_block *frag, block;
3199 struct sctp_tmit_chunk *tp1;
3204 uint16_t frag_strt, frag_end, prev_frag_end;
3206 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3210 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3213 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3215 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3216 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3217 *offset += sizeof(block);
3219 return (chunk_freed);
3221 frag_strt = ntohs(frag->start);
3222 frag_end = ntohs(frag->end);
3224 if (frag_strt > frag_end) {
3225 /* This gap report is malformed, skip it. */
3228 if (frag_strt <= prev_frag_end) {
3229 /* This gap report is not in order, so restart. */
3230 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3232 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3233 *biggest_tsn_acked = last_tsn + frag_end;
3240 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3241 non_revocable, &num_frs, biggest_newly_acked_tsn,
3242 this_sack_lowest_newack, rto_ok)) {
3245 prev_frag_end = frag_end;
3247 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3249 sctp_log_fr(*biggest_tsn_acked,
3250 *biggest_newly_acked_tsn,
3251 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3253 return (chunk_freed);
3257 sctp_check_for_revoked(struct sctp_tcb *stcb,
3258 struct sctp_association *asoc, uint32_t cumack,
3259 uint32_t biggest_tsn_acked)
3261 struct sctp_tmit_chunk *tp1;
3263 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3264 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3266 * ok this guy is either ACK or MARKED. If it is
3267 * ACKED it has been previously acked but not this
3268 * time i.e. revoked. If it is MARKED it was ACK'ed
3271 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3274 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3275 /* it has been revoked */
3276 tp1->sent = SCTP_DATAGRAM_SENT;
3277 tp1->rec.data.chunk_was_revoked = 1;
3279 * We must add this stuff back in to assure
3280 * timers and such get started.
3282 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3283 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3284 tp1->whoTo->flight_size,
3286 (uint32_t)(uintptr_t)tp1->whoTo,
3289 sctp_flight_size_increase(tp1);
3290 sctp_total_flight_increase(stcb, tp1);
3292 * We inflate the cwnd to compensate for our
3293 * artificial inflation of the flight_size.
3295 tp1->whoTo->cwnd += tp1->book_size;
3296 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3297 sctp_log_sack(asoc->last_acked_seq,
3302 SCTP_LOG_TSN_REVOKED);
3304 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3305 /* it has been re-acked in this SACK */
3306 tp1->sent = SCTP_DATAGRAM_ACKED;
3309 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3316 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3317 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3319 struct sctp_tmit_chunk *tp1;
3320 int strike_flag = 0;
3322 int tot_retrans = 0;
3323 uint32_t sending_seq;
3324 struct sctp_nets *net;
3325 int num_dests_sacked = 0;
3328 * select the sending_seq, this is either the next thing ready to be
3329 * sent but not transmitted, OR, the next seq we assign.
3331 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3333 sending_seq = asoc->sending_seq;
3335 sending_seq = tp1->rec.data.tsn;
3338 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3339 if ((asoc->sctp_cmt_on_off > 0) &&
3340 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3341 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3342 if (net->saw_newack)
3346 if (stcb->asoc.prsctp_supported) {
3347 (void)SCTP_GETTIME_TIMEVAL(&now);
3349 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3351 if (tp1->no_fr_allowed) {
3352 /* this one had a timeout or something */
3355 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3356 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3357 sctp_log_fr(biggest_tsn_newly_acked,
3360 SCTP_FR_LOG_CHECK_STRIKE);
3362 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3363 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3367 if (stcb->asoc.prsctp_supported) {
3368 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3369 /* Is it expired? */
3370 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3371 /* Yes so drop it */
3372 if (tp1->data != NULL) {
3373 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3374 SCTP_SO_NOT_LOCKED);
3381 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3382 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3383 /* we are beyond the tsn in the sack */
3386 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3387 /* either a RESEND, ACKED, or MARKED */
3389 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3390 /* Continue strikin FWD-TSN chunks */
3391 tp1->rec.data.fwd_tsn_cnt++;
3396 * CMT : SFR algo (covers part of DAC and HTNA as well)
3398 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3400 * No new acks were receieved for data sent to this
3401 * dest. Therefore, according to the SFR algo for
3402 * CMT, no data sent to this dest can be marked for
3403 * FR using this SACK.
3406 } else if (tp1->whoTo &&
3407 SCTP_TSN_GT(tp1->rec.data.tsn,
3408 tp1->whoTo->this_sack_highest_newack) &&
3409 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3411 * CMT: New acks were receieved for data sent to
3412 * this dest. But no new acks were seen for data
3413 * sent after tp1. Therefore, according to the SFR
3414 * algo for CMT, tp1 cannot be marked for FR using
3415 * this SACK. This step covers part of the DAC algo
3416 * and the HTNA algo as well.
3421 * Here we check to see if we were have already done a FR
3422 * and if so we see if the biggest TSN we saw in the sack is
3423 * smaller than the recovery point. If so we don't strike
3424 * the tsn... otherwise we CAN strike the TSN.
3427 * @@@ JRI: Check for CMT if (accum_moved &&
3428 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3431 if (accum_moved && asoc->fast_retran_loss_recovery) {
3433 * Strike the TSN if in fast-recovery and cum-ack
3436 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3437 sctp_log_fr(biggest_tsn_newly_acked,
3440 SCTP_FR_LOG_STRIKE_CHUNK);
3442 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3445 if ((asoc->sctp_cmt_on_off > 0) &&
3446 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3448 * CMT DAC algorithm: If SACK flag is set to
3449 * 0, then lowest_newack test will not pass
3450 * because it would have been set to the
3451 * cumack earlier. If not already to be
3452 * rtx'd, If not a mixed sack and if tp1 is
3453 * not between two sacked TSNs, then mark by
3454 * one more. NOTE that we are marking by one
3455 * additional time since the SACK DAC flag
3456 * indicates that two packets have been
3457 * received after this missing TSN.
3459 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3460 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3461 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3462 sctp_log_fr(16 + num_dests_sacked,
3465 SCTP_FR_LOG_STRIKE_CHUNK);
3470 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3471 (asoc->sctp_cmt_on_off == 0)) {
3473 * For those that have done a FR we must take
3474 * special consideration if we strike. I.e the
3475 * biggest_newly_acked must be higher than the
3476 * sending_seq at the time we did the FR.
3479 #ifdef SCTP_FR_TO_ALTERNATE
3481 * If FR's go to new networks, then we must only do
3482 * this for singly homed asoc's. However if the FR's
3483 * go to the same network (Armando's work) then its
3484 * ok to FR multiple times.
3492 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3493 tp1->rec.data.fast_retran_tsn)) {
3495 * Strike the TSN, since this ack is
3496 * beyond where things were when we
3499 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3500 sctp_log_fr(biggest_tsn_newly_acked,
3503 SCTP_FR_LOG_STRIKE_CHUNK);
3505 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3509 if ((asoc->sctp_cmt_on_off > 0) &&
3510 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3512 * CMT DAC algorithm: If
3513 * SACK flag is set to 0,
3514 * then lowest_newack test
3515 * will not pass because it
3516 * would have been set to
3517 * the cumack earlier. If
3518 * not already to be rtx'd,
3519 * If not a mixed sack and
3520 * if tp1 is not between two
3521 * sacked TSNs, then mark by
3522 * one more. NOTE that we
3523 * are marking by one
3524 * additional time since the
3525 * SACK DAC flag indicates
3526 * that two packets have
3527 * been received after this
3530 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3531 (num_dests_sacked == 1) &&
3532 SCTP_TSN_GT(this_sack_lowest_newack,
3533 tp1->rec.data.tsn)) {
3534 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3535 sctp_log_fr(32 + num_dests_sacked,
3538 SCTP_FR_LOG_STRIKE_CHUNK);
3540 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3548 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3551 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3552 biggest_tsn_newly_acked)) {
3554 * We don't strike these: This is the HTNA
3555 * algorithm i.e. we don't strike If our TSN is
3556 * larger than the Highest TSN Newly Acked.
3560 /* Strike the TSN */
3561 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3562 sctp_log_fr(biggest_tsn_newly_acked,
3565 SCTP_FR_LOG_STRIKE_CHUNK);
3567 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3570 if ((asoc->sctp_cmt_on_off > 0) &&
3571 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3573 * CMT DAC algorithm: If SACK flag is set to
3574 * 0, then lowest_newack test will not pass
3575 * because it would have been set to the
3576 * cumack earlier. If not already to be
3577 * rtx'd, If not a mixed sack and if tp1 is
3578 * not between two sacked TSNs, then mark by
3579 * one more. NOTE that we are marking by one
3580 * additional time since the SACK DAC flag
3581 * indicates that two packets have been
3582 * received after this missing TSN.
3584 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3585 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3587 sctp_log_fr(48 + num_dests_sacked,
3590 SCTP_FR_LOG_STRIKE_CHUNK);
3596 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3597 struct sctp_nets *alt;
3599 /* fix counts and things */
3600 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3601 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3602 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3604 (uint32_t)(uintptr_t)tp1->whoTo,
3608 tp1->whoTo->net_ack++;
3609 sctp_flight_size_decrease(tp1);
3610 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3611 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3616 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3617 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3618 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3620 /* add back to the rwnd */
3621 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3623 /* remove from the total flight */
3624 sctp_total_flight_decrease(stcb, tp1);
3626 if ((stcb->asoc.prsctp_supported) &&
3627 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3629 * Has it been retransmitted tv_sec times? -
3630 * we store the retran count there.
3632 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3633 /* Yes, so drop it */
3634 if (tp1->data != NULL) {
3635 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3636 SCTP_SO_NOT_LOCKED);
3638 /* Make sure to flag we had a FR */
3639 if (tp1->whoTo != NULL) {
3640 tp1->whoTo->net_ack++;
3646 * SCTP_PRINTF("OK, we are now ready to FR this
3649 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3650 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3654 /* This is a subsequent FR */
3655 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3657 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3658 if (asoc->sctp_cmt_on_off > 0) {
3660 * CMT: Using RTX_SSTHRESH policy for CMT.
3661 * If CMT is being used, then pick dest with
3662 * largest ssthresh for any retransmission.
3664 tp1->no_fr_allowed = 1;
3666 /* sa_ignore NO_NULL_CHK */
3667 if (asoc->sctp_cmt_pf > 0) {
3669 * JRS 5/18/07 - If CMT PF is on,
3670 * use the PF version of
3673 alt = sctp_find_alternate_net(stcb, alt, 2);
3676 * JRS 5/18/07 - If only CMT is on,
3677 * use the CMT version of
3680 /* sa_ignore NO_NULL_CHK */
3681 alt = sctp_find_alternate_net(stcb, alt, 1);
3687 * CUCv2: If a different dest is picked for
3688 * the retransmission, then new
3689 * (rtx-)pseudo_cumack needs to be tracked
3690 * for orig dest. Let CUCv2 track new (rtx-)
3691 * pseudo-cumack always.
3694 tp1->whoTo->find_pseudo_cumack = 1;
3695 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3698 } else { /* CMT is OFF */
3700 #ifdef SCTP_FR_TO_ALTERNATE
3701 /* Can we find an alternate? */
3702 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3705 * default behavior is to NOT retransmit
3706 * FR's to an alternate. Armando Caro's
3707 * paper details why.
3713 tp1->rec.data.doing_fast_retransmit = 1;
3715 /* mark the sending seq for possible subsequent FR's */
3717 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3718 * (uint32_t)tpi->rec.data.tsn);
3720 if (TAILQ_EMPTY(&asoc->send_queue)) {
3722 * If the queue of send is empty then its
3723 * the next sequence number that will be
3724 * assigned so we subtract one from this to
3725 * get the one we last sent.
3727 tp1->rec.data.fast_retran_tsn = sending_seq;
3730 * If there are chunks on the send queue
3731 * (unsent data that has made it from the
3732 * stream queues but not out the door, we
3733 * take the first one (which will have the
3734 * lowest TSN) and subtract one to get the
3737 struct sctp_tmit_chunk *ttt;
3739 ttt = TAILQ_FIRST(&asoc->send_queue);
3740 tp1->rec.data.fast_retran_tsn =
3746 * this guy had a RTO calculation pending on
3749 if ((tp1->whoTo != NULL) &&
3750 (tp1->whoTo->rto_needed == 0)) {
3751 tp1->whoTo->rto_needed = 1;
3755 if (alt != tp1->whoTo) {
3756 /* yes, there is an alternate. */
3757 sctp_free_remote_addr(tp1->whoTo);
3758 /* sa_ignore FREED_MEMORY */
3760 atomic_add_int(&alt->ref_count, 1);
3766 struct sctp_tmit_chunk *
3767 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3768 struct sctp_association *asoc)
3770 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3774 if (asoc->prsctp_supported == 0) {
3777 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3778 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3779 tp1->sent != SCTP_DATAGRAM_RESEND &&
3780 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3781 /* no chance to advance, out of here */
3784 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3785 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3786 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3787 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3788 asoc->advanced_peer_ack_point,
3789 tp1->rec.data.tsn, 0, 0);
3792 if (!PR_SCTP_ENABLED(tp1->flags)) {
3794 * We can't fwd-tsn past any that are reliable aka
3795 * retransmitted until the asoc fails.
3800 (void)SCTP_GETTIME_TIMEVAL(&now);
3804 * now we got a chunk which is marked for another
3805 * retransmission to a PR-stream but has run out its chances
3806 * already maybe OR has been marked to skip now. Can we skip
3807 * it if its a resend?
3809 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3810 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3812 * Now is this one marked for resend and its time is
3815 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3816 /* Yes so drop it */
3818 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3819 1, SCTP_SO_NOT_LOCKED);
3823 * No, we are done when hit one for resend
3824 * whos time as not expired.
3830 * Ok now if this chunk is marked to drop it we can clean up
3831 * the chunk, advance our peer ack point and we can check
3834 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3835 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3836 /* advance PeerAckPoint goes forward */
3837 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3838 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3840 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3841 /* No update but we do save the chk */
3846 * If it is still in RESEND we can advance no
3856 sctp_fs_audit(struct sctp_association *asoc)
3858 struct sctp_tmit_chunk *chk;
3859 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3862 int entry_flight, entry_cnt;
3867 entry_flight = asoc->total_flight;
3868 entry_cnt = asoc->total_flight_count;
3870 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3873 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3874 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3875 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3880 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3882 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3884 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3891 if ((inflight > 0) || (inbetween > 0)) {
3893 panic("Flight size-express incorrect? \n");
3895 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3896 entry_flight, entry_cnt);
3898 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3899 inflight, inbetween, resend, above, acked);
3908 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3909 struct sctp_association *asoc,
3910 struct sctp_tmit_chunk *tp1)
3912 tp1->window_probe = 0;
3913 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3914 /* TSN's skipped we do NOT move back. */
3915 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3916 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3918 (uint32_t)(uintptr_t)tp1->whoTo,
3922 /* First setup this by shrinking flight */
3923 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3924 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3927 sctp_flight_size_decrease(tp1);
3928 sctp_total_flight_decrease(stcb, tp1);
3929 /* Now mark for resend */
3930 tp1->sent = SCTP_DATAGRAM_RESEND;
3931 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3933 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3934 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3935 tp1->whoTo->flight_size,
3937 (uint32_t)(uintptr_t)tp1->whoTo,
3943 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3944 uint32_t rwnd, int *abort_now, int ecne_seen)
3946 struct sctp_nets *net;
3947 struct sctp_association *asoc;
3948 struct sctp_tmit_chunk *tp1, *tp2;
3950 int win_probe_recovery = 0;
3951 int win_probe_recovered = 0;
3952 int j, done_once = 0;
3956 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3957 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3958 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3960 SCTP_TCB_LOCK_ASSERT(stcb);
3961 #ifdef SCTP_ASOCLOG_OF_TSNS
3962 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3963 stcb->asoc.cumack_log_at++;
3964 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3965 stcb->asoc.cumack_log_at = 0;
3969 old_rwnd = asoc->peers_rwnd;
3970 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3973 } else if (asoc->last_acked_seq == cumack) {
3974 /* Window update sack */
3975 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3976 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3977 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3978 /* SWS sender side engages */
3979 asoc->peers_rwnd = 0;
3981 if (asoc->peers_rwnd > old_rwnd) {
3987 /* First setup for CC stuff */
3988 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3989 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3990 /* Drag along the window_tsn for cwr's */
3991 net->cwr_window_tsn = cumack;
3993 net->prev_cwnd = net->cwnd;
3998 * CMT: Reset CUC and Fast recovery algo variables before
4001 net->new_pseudo_cumack = 0;
4002 net->will_exit_fast_recovery = 0;
4003 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4004 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4007 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4008 tp1 = TAILQ_LAST(&asoc->sent_queue,
4009 sctpchunk_listhead);
4010 send_s = tp1->rec.data.tsn + 1;
4012 send_s = asoc->sending_seq;
4014 if (SCTP_TSN_GE(cumack, send_s)) {
4015 struct mbuf *op_err;
4016 char msg[SCTP_DIAG_INFO_LEN];
4020 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4022 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4023 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4024 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4027 asoc->this_sack_highest_gap = cumack;
4028 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4029 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4030 stcb->asoc.overall_error_count,
4032 SCTP_FROM_SCTP_INDATA,
4035 stcb->asoc.overall_error_count = 0;
4036 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4037 /* process the new consecutive TSN first */
4038 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4039 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4040 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4041 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4043 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4045 * If it is less than ACKED, it is
4046 * now no-longer in flight. Higher
4047 * values may occur during marking
4049 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4050 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4051 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4052 tp1->whoTo->flight_size,
4054 (uint32_t)(uintptr_t)tp1->whoTo,
4057 sctp_flight_size_decrease(tp1);
4058 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4059 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4062 /* sa_ignore NO_NULL_CHK */
4063 sctp_total_flight_decrease(stcb, tp1);
4065 tp1->whoTo->net_ack += tp1->send_size;
4066 if (tp1->snd_count < 2) {
4068 * True non-retransmitted
4071 tp1->whoTo->net_ack2 +=
4074 /* update RTO too? */
4082 sctp_calculate_rto(stcb,
4084 &tp1->sent_rcv_time,
4085 SCTP_RTT_FROM_DATA);
4088 if (tp1->whoTo->rto_needed == 0) {
4089 tp1->whoTo->rto_needed = 1;
4095 * CMT: CUCv2 algorithm. From the
4096 * cumack'd TSNs, for each TSN being
4097 * acked for the first time, set the
4098 * following variables for the
4099 * corresp destination.
4100 * new_pseudo_cumack will trigger a
4102 * find_(rtx_)pseudo_cumack will
4103 * trigger search for the next
4104 * expected (rtx-)pseudo-cumack.
4106 tp1->whoTo->new_pseudo_cumack = 1;
4107 tp1->whoTo->find_pseudo_cumack = 1;
4108 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4110 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4111 /* sa_ignore NO_NULL_CHK */
4112 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4115 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4116 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4118 if (tp1->rec.data.chunk_was_revoked) {
4119 /* deflate the cwnd */
4120 tp1->whoTo->cwnd -= tp1->book_size;
4121 tp1->rec.data.chunk_was_revoked = 0;
4123 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4124 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4125 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4128 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4132 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4133 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4134 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4135 asoc->trigger_reset = 1;
4137 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4139 /* sa_ignore NO_NULL_CHK */
4140 sctp_free_bufspace(stcb, asoc, tp1, 1);
4141 sctp_m_freem(tp1->data);
4144 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4145 sctp_log_sack(asoc->last_acked_seq,
4150 SCTP_LOG_FREE_SENT);
4152 asoc->sent_queue_cnt--;
4153 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4160 /* sa_ignore NO_NULL_CHK */
4161 if (stcb->sctp_socket) {
4162 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4166 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4167 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4168 /* sa_ignore NO_NULL_CHK */
4169 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4171 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4172 so = SCTP_INP_SO(stcb->sctp_ep);
4173 atomic_add_int(&stcb->asoc.refcnt, 1);
4174 SCTP_TCB_UNLOCK(stcb);
4175 SCTP_SOCKET_LOCK(so, 1);
4176 SCTP_TCB_LOCK(stcb);
4177 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4178 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4179 /* assoc was freed while we were unlocked */
4180 SCTP_SOCKET_UNLOCK(so, 1);
4184 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4185 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4186 SCTP_SOCKET_UNLOCK(so, 1);
4189 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4190 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4194 /* JRS - Use the congestion control given in the CC module */
4195 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4196 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4197 if (net->net_ack2 > 0) {
4199 * Karn's rule applies to clearing error
4200 * count, this is optional.
4202 net->error_count = 0;
4203 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4204 /* addr came good */
4205 net->dest_state |= SCTP_ADDR_REACHABLE;
4206 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4207 0, (void *)net, SCTP_SO_NOT_LOCKED);
4209 if (net == stcb->asoc.primary_destination) {
4210 if (stcb->asoc.alternate) {
4212 * release the alternate,
4215 sctp_free_remote_addr(stcb->asoc.alternate);
4216 stcb->asoc.alternate = NULL;
4219 if (net->dest_state & SCTP_ADDR_PF) {
4220 net->dest_state &= ~SCTP_ADDR_PF;
4221 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4222 stcb->sctp_ep, stcb, net,
4223 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4224 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4225 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4226 /* Done with this net */
4229 /* restore any doubled timers */
4230 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4231 if (net->RTO < stcb->asoc.minrto) {
4232 net->RTO = stcb->asoc.minrto;
4234 if (net->RTO > stcb->asoc.maxrto) {
4235 net->RTO = stcb->asoc.maxrto;
4239 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4241 asoc->last_acked_seq = cumack;
4243 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4244 /* nothing left in-flight */
4245 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4246 net->flight_size = 0;
4247 net->partial_bytes_acked = 0;
4249 asoc->total_flight = 0;
4250 asoc->total_flight_count = 0;
4254 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4255 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4256 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4257 /* SWS sender side engages */
4258 asoc->peers_rwnd = 0;
4260 if (asoc->peers_rwnd > old_rwnd) {
4261 win_probe_recovery = 1;
4263 /* Now assure a timer where data is queued at */
4266 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4267 if (win_probe_recovery && (net->window_probe)) {
4268 win_probe_recovered = 1;
4270 * Find first chunk that was used with window probe
4271 * and clear the sent
4273 /* sa_ignore FREED_MEMORY */
4274 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4275 if (tp1->window_probe) {
4276 /* move back to data send queue */
4277 sctp_window_probe_recovery(stcb, asoc, tp1);
4282 if (net->flight_size) {
4284 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4285 if (net->window_probe) {
4286 net->window_probe = 0;
4289 if (net->window_probe) {
4291 * In window probes we must assure a timer
4292 * is still running there
4294 net->window_probe = 0;
4295 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4296 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4298 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4299 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4301 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4306 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4307 (asoc->sent_queue_retran_cnt == 0) &&
4308 (win_probe_recovered == 0) &&
4311 * huh, this should not happen unless all packets are
4312 * PR-SCTP and marked to skip of course.
4314 if (sctp_fs_audit(asoc)) {
4315 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4316 net->flight_size = 0;
4318 asoc->total_flight = 0;
4319 asoc->total_flight_count = 0;
4320 asoc->sent_queue_retran_cnt = 0;
4321 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4322 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4323 sctp_flight_size_increase(tp1);
4324 sctp_total_flight_increase(stcb, tp1);
4325 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4326 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4333 /**********************************/
4334 /* Now what about shutdown issues */
4335 /**********************************/
4336 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4337 /* nothing left on sendqueue.. consider done */
4339 if ((asoc->stream_queue_cnt == 1) &&
4340 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4341 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4342 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4343 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4345 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4346 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4347 (asoc->stream_queue_cnt == 1) &&
4348 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4349 struct mbuf *op_err;
4353 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4354 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4355 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4358 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4359 (asoc->stream_queue_cnt == 0)) {
4360 struct sctp_nets *netp;
4362 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4363 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4364 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4366 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4367 sctp_stop_timers_for_shutdown(stcb);
4368 if (asoc->alternate) {
4369 netp = asoc->alternate;
4371 netp = asoc->primary_destination;
4373 sctp_send_shutdown(stcb, netp);
4374 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4375 stcb->sctp_ep, stcb, netp);
4376 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4377 stcb->sctp_ep, stcb, netp);
4378 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4379 (asoc->stream_queue_cnt == 0)) {
4380 struct sctp_nets *netp;
4382 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4383 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4384 sctp_stop_timers_for_shutdown(stcb);
4385 if (asoc->alternate) {
4386 netp = asoc->alternate;
4388 netp = asoc->primary_destination;
4390 sctp_send_shutdown_ack(stcb, netp);
4391 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4392 stcb->sctp_ep, stcb, netp);
4395 /*********************************************/
4396 /* Here we perform PR-SCTP procedures */
4398 /*********************************************/
4399 /* C1. update advancedPeerAckPoint */
4400 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4401 asoc->advanced_peer_ack_point = cumack;
4403 /* PR-Sctp issues need to be addressed too */
4404 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4405 struct sctp_tmit_chunk *lchk;
4406 uint32_t old_adv_peer_ack_point;
4408 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4409 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4410 /* C3. See if we need to send a Fwd-TSN */
4411 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4413 * ISSUE with ECN, see FWD-TSN processing.
4415 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4416 send_forward_tsn(stcb, asoc);
4418 /* try to FR fwd-tsn's that get lost too */
4419 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4420 send_forward_tsn(stcb, asoc);
4425 /* Assure a timer is up */
4426 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4427 stcb->sctp_ep, stcb, lchk->whoTo);
4430 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4431 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4433 stcb->asoc.peers_rwnd,
4434 stcb->asoc.total_flight,
4435 stcb->asoc.total_output_queue_size);
4440 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4441 struct sctp_tcb *stcb,
4442 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4443 int *abort_now, uint8_t flags,
4444 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4446 struct sctp_association *asoc;
4447 struct sctp_tmit_chunk *tp1, *tp2;
4448 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4449 uint16_t wake_him = 0;
4450 uint32_t send_s = 0;
4452 int accum_moved = 0;
4453 int will_exit_fast_recovery = 0;
4454 uint32_t a_rwnd, old_rwnd;
4455 int win_probe_recovery = 0;
4456 int win_probe_recovered = 0;
4457 struct sctp_nets *net = NULL;
4460 uint8_t reneged_all = 0;
4461 uint8_t cmt_dac_flag;
4464 * we take any chance we can to service our queues since we cannot
4465 * get awoken when the socket is read from :<
4468 * Now perform the actual SACK handling: 1) Verify that it is not an
4469 * old sack, if so discard. 2) If there is nothing left in the send
4470 * queue (cum-ack is equal to last acked) then you have a duplicate
4471 * too, update any rwnd change and verify no timers are running.
4472 * then return. 3) Process any new consequtive data i.e. cum-ack
4473 * moved process these first and note that it moved. 4) Process any
4474 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4475 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4476 * sync up flightsizes and things, stop all timers and also check
4477 * for shutdown_pending state. If so then go ahead and send off the
4478 * shutdown. If in shutdown recv, send off the shutdown-ack and
4479 * start that timer, Ret. 9) Strike any non-acked things and do FR
4480 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4481 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4482 * if in shutdown_recv state.
4484 SCTP_TCB_LOCK_ASSERT(stcb);
4486 this_sack_lowest_newack = 0;
4487 SCTP_STAT_INCR(sctps_slowpath_sack);
4489 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4490 #ifdef SCTP_ASOCLOG_OF_TSNS
4491 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4492 stcb->asoc.cumack_log_at++;
4493 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4494 stcb->asoc.cumack_log_at = 0;
4499 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4500 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4501 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4504 old_rwnd = stcb->asoc.peers_rwnd;
4505 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4506 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4507 stcb->asoc.overall_error_count,
4509 SCTP_FROM_SCTP_INDATA,
4512 stcb->asoc.overall_error_count = 0;
4514 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4515 sctp_log_sack(asoc->last_acked_seq,
4522 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4524 uint32_t *dupdata, dblock;
4526 for (i = 0; i < num_dup; i++) {
4527 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4528 sizeof(uint32_t), (uint8_t *)&dblock);
4529 if (dupdata == NULL) {
4532 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4536 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4537 tp1 = TAILQ_LAST(&asoc->sent_queue,
4538 sctpchunk_listhead);
4539 send_s = tp1->rec.data.tsn + 1;
4542 send_s = asoc->sending_seq;
4544 if (SCTP_TSN_GE(cum_ack, send_s)) {
4545 struct mbuf *op_err;
4546 char msg[SCTP_DIAG_INFO_LEN];
4549 * no way, we have not even sent this TSN out yet. Peer is
4550 * hopelessly messed up with us.
4552 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4555 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4556 tp1->rec.data.tsn, (void *)tp1);
4561 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4563 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4564 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4565 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4568 /**********************/
4569 /* 1) check the range */
4570 /**********************/
4571 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4572 /* acking something behind */
4576 /* update the Rwnd of the peer */
4577 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4578 TAILQ_EMPTY(&asoc->send_queue) &&
4579 (asoc->stream_queue_cnt == 0)) {
4580 /* nothing left on send/sent and strmq */
4581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4582 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4583 asoc->peers_rwnd, 0, 0, a_rwnd);
4585 asoc->peers_rwnd = a_rwnd;
4586 if (asoc->sent_queue_retran_cnt) {
4587 asoc->sent_queue_retran_cnt = 0;
4589 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4590 /* SWS sender side engages */
4591 asoc->peers_rwnd = 0;
4593 /* stop any timers */
4594 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4595 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4596 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4597 net->partial_bytes_acked = 0;
4598 net->flight_size = 0;
4600 asoc->total_flight = 0;
4601 asoc->total_flight_count = 0;
4605 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4606 * things. The total byte count acked is tracked in netAckSz AND
4607 * netAck2 is used to track the total bytes acked that are un-
4608 * amibguious and were never retransmitted. We track these on a per
4609 * destination address basis.
4611 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4612 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4613 /* Drag along the window_tsn for cwr's */
4614 net->cwr_window_tsn = cum_ack;
4616 net->prev_cwnd = net->cwnd;
4621 * CMT: Reset CUC and Fast recovery algo variables before
4624 net->new_pseudo_cumack = 0;
4625 net->will_exit_fast_recovery = 0;
4626 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4627 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4631 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4632 * to be greater than the cumack. Also reset saw_newack to 0
4635 net->saw_newack = 0;
4636 net->this_sack_highest_newack = last_tsn;
4638 /* process the new consecutive TSN first */
4639 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4640 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4641 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4643 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4645 * If it is less than ACKED, it is
4646 * now no-longer in flight. Higher
4647 * values may occur during marking
4649 if ((tp1->whoTo->dest_state &
4650 SCTP_ADDR_UNCONFIRMED) &&
4651 (tp1->snd_count < 2)) {
4653 * If there was no retran
4654 * and the address is
4655 * un-confirmed and we sent
4657 * sacked.. its confirmed,
4660 tp1->whoTo->dest_state &=
4661 ~SCTP_ADDR_UNCONFIRMED;
4663 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4664 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4665 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4666 tp1->whoTo->flight_size,
4668 (uint32_t)(uintptr_t)tp1->whoTo,
4671 sctp_flight_size_decrease(tp1);
4672 sctp_total_flight_decrease(stcb, tp1);
4673 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4674 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4678 tp1->whoTo->net_ack += tp1->send_size;
4680 /* CMT SFR and DAC algos */
4681 this_sack_lowest_newack = tp1->rec.data.tsn;
4682 tp1->whoTo->saw_newack = 1;
4684 if (tp1->snd_count < 2) {
4686 * True non-retransmitted
4689 tp1->whoTo->net_ack2 +=
4692 /* update RTO too? */
4696 sctp_calculate_rto(stcb,
4698 &tp1->sent_rcv_time,
4699 SCTP_RTT_FROM_DATA);
4702 if (tp1->whoTo->rto_needed == 0) {
4703 tp1->whoTo->rto_needed = 1;
4709 * CMT: CUCv2 algorithm. From the
4710 * cumack'd TSNs, for each TSN being
4711 * acked for the first time, set the
4712 * following variables for the
4713 * corresp destination.
4714 * new_pseudo_cumack will trigger a
4716 * find_(rtx_)pseudo_cumack will
4717 * trigger search for the next
4718 * expected (rtx-)pseudo-cumack.
4720 tp1->whoTo->new_pseudo_cumack = 1;
4721 tp1->whoTo->find_pseudo_cumack = 1;
4722 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4725 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4726 sctp_log_sack(asoc->last_acked_seq,
4731 SCTP_LOG_TSN_ACKED);
4733 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4734 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4737 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4738 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4739 #ifdef SCTP_AUDITING_ENABLED
4740 sctp_audit_log(0xB3,
4741 (asoc->sent_queue_retran_cnt & 0x000000ff));
4744 if (tp1->rec.data.chunk_was_revoked) {
4745 /* deflate the cwnd */
4746 tp1->whoTo->cwnd -= tp1->book_size;
4747 tp1->rec.data.chunk_was_revoked = 0;
4749 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4750 tp1->sent = SCTP_DATAGRAM_ACKED;
4757 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4758 /* always set this up to cum-ack */
4759 asoc->this_sack_highest_gap = last_tsn;
4761 if ((num_seg > 0) || (num_nr_seg > 0)) {
4764 * thisSackHighestGap will increase while handling NEW
4765 * segments this_sack_highest_newack will increase while
4766 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4767 * used for CMT DAC algo. saw_newack will also change.
4769 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4770 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4771 num_seg, num_nr_seg, &rto_ok)) {
4775 * validate the biggest_tsn_acked in the gap acks if strict
4776 * adherence is wanted.
4778 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4780 * peer is either confused or we are under attack.
4783 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4784 biggest_tsn_acked, send_s);
4788 /*******************************************/
4789 /* cancel ALL T3-send timer if accum moved */
4790 /*******************************************/
4791 if (asoc->sctp_cmt_on_off > 0) {
4792 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4793 if (net->new_pseudo_cumack)
4794 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4796 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4801 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4802 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4803 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4807 /********************************************/
4808 /* drop the acked chunks from the sentqueue */
4809 /********************************************/
4810 asoc->last_acked_seq = cum_ack;
4812 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4813 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4816 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4817 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4818 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4821 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4825 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4826 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4827 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4828 asoc->trigger_reset = 1;
4830 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4831 if (PR_SCTP_ENABLED(tp1->flags)) {
4832 if (asoc->pr_sctp_cnt != 0)
4833 asoc->pr_sctp_cnt--;
4835 asoc->sent_queue_cnt--;
4837 /* sa_ignore NO_NULL_CHK */
4838 sctp_free_bufspace(stcb, asoc, tp1, 1);
4839 sctp_m_freem(tp1->data);
4841 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4842 asoc->sent_queue_cnt_removeable--;
4845 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4846 sctp_log_sack(asoc->last_acked_seq,
4851 SCTP_LOG_FREE_SENT);
4853 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4856 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4858 panic("Warning flight size is positive and should be 0");
4860 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4861 asoc->total_flight);
4863 asoc->total_flight = 0;
4866 /* sa_ignore NO_NULL_CHK */
4867 if ((wake_him) && (stcb->sctp_socket)) {
4868 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4872 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4873 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4874 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4876 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4877 so = SCTP_INP_SO(stcb->sctp_ep);
4878 atomic_add_int(&stcb->asoc.refcnt, 1);
4879 SCTP_TCB_UNLOCK(stcb);
4880 SCTP_SOCKET_LOCK(so, 1);
4881 SCTP_TCB_LOCK(stcb);
4882 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4883 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4884 /* assoc was freed while we were unlocked */
4885 SCTP_SOCKET_UNLOCK(so, 1);
4889 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4890 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4891 SCTP_SOCKET_UNLOCK(so, 1);
4894 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4895 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4899 if (asoc->fast_retran_loss_recovery && accum_moved) {
4900 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4901 /* Setup so we will exit RFC2582 fast recovery */
4902 will_exit_fast_recovery = 1;
4906 * Check for revoked fragments:
4908 * if Previous sack - Had no frags then we can't have any revoked if
4909 * Previous sack - Had frag's then - If we now have frags aka
4910 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4911 * some of them. else - The peer revoked all ACKED fragments, since
4912 * we had some before and now we have NONE.
4916 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4917 asoc->saw_sack_with_frags = 1;
4918 } else if (asoc->saw_sack_with_frags) {
4919 int cnt_revoked = 0;
4921 /* Peer revoked all dg's marked or acked */
4922 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4923 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4924 tp1->sent = SCTP_DATAGRAM_SENT;
4925 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4926 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4927 tp1->whoTo->flight_size,
4929 (uint32_t)(uintptr_t)tp1->whoTo,
4932 sctp_flight_size_increase(tp1);
4933 sctp_total_flight_increase(stcb, tp1);
4934 tp1->rec.data.chunk_was_revoked = 1;
4936 * To ensure that this increase in
4937 * flightsize, which is artificial, does not
4938 * throttle the sender, we also increase the
4939 * cwnd artificially.
4941 tp1->whoTo->cwnd += tp1->book_size;
4948 asoc->saw_sack_with_frags = 0;
4951 asoc->saw_sack_with_nr_frags = 1;
4953 asoc->saw_sack_with_nr_frags = 0;
4955 /* JRS - Use the congestion control given in the CC module */
4956 if (ecne_seen == 0) {
4957 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4958 if (net->net_ack2 > 0) {
4960 * Karn's rule applies to clearing error
4961 * count, this is optional.
4963 net->error_count = 0;
4964 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4965 /* addr came good */
4966 net->dest_state |= SCTP_ADDR_REACHABLE;
4967 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4968 0, (void *)net, SCTP_SO_NOT_LOCKED);
4971 if (net == stcb->asoc.primary_destination) {
4972 if (stcb->asoc.alternate) {
4974 * release the alternate,
4977 sctp_free_remote_addr(stcb->asoc.alternate);
4978 stcb->asoc.alternate = NULL;
4982 if (net->dest_state & SCTP_ADDR_PF) {
4983 net->dest_state &= ~SCTP_ADDR_PF;
4984 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4985 stcb->sctp_ep, stcb, net,
4986 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4987 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4988 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4989 /* Done with this net */
4992 /* restore any doubled timers */
4993 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4994 if (net->RTO < stcb->asoc.minrto) {
4995 net->RTO = stcb->asoc.minrto;
4997 if (net->RTO > stcb->asoc.maxrto) {
4998 net->RTO = stcb->asoc.maxrto;
5002 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5005 if (TAILQ_EMPTY(&asoc->sent_queue)) {
5006 /* nothing left in-flight */
5007 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5008 /* stop all timers */
5009 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5011 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5012 net->flight_size = 0;
5013 net->partial_bytes_acked = 0;
5015 asoc->total_flight = 0;
5016 asoc->total_flight_count = 0;
5019 /**********************************/
5020 /* Now what about shutdown issues */
5021 /**********************************/
5022 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5023 /* nothing left on sendqueue.. consider done */
5024 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5025 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5026 asoc->peers_rwnd, 0, 0, a_rwnd);
5028 asoc->peers_rwnd = a_rwnd;
5029 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5030 /* SWS sender side engages */
5031 asoc->peers_rwnd = 0;
5034 if ((asoc->stream_queue_cnt == 1) &&
5035 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5036 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5037 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5038 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5040 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5041 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5042 (asoc->stream_queue_cnt == 1) &&
5043 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5044 struct mbuf *op_err;
5048 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5049 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5050 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5053 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5054 (asoc->stream_queue_cnt == 0)) {
5055 struct sctp_nets *netp;
5057 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5058 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5059 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5061 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5062 sctp_stop_timers_for_shutdown(stcb);
5063 if (asoc->alternate) {
5064 netp = asoc->alternate;
5066 netp = asoc->primary_destination;
5068 sctp_send_shutdown(stcb, netp);
5069 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5070 stcb->sctp_ep, stcb, netp);
5071 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5072 stcb->sctp_ep, stcb, netp);
5074 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5075 (asoc->stream_queue_cnt == 0)) {
5076 struct sctp_nets *netp;
5078 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5079 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5080 sctp_stop_timers_for_shutdown(stcb);
5081 if (asoc->alternate) {
5082 netp = asoc->alternate;
5084 netp = asoc->primary_destination;
5086 sctp_send_shutdown_ack(stcb, netp);
5087 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5088 stcb->sctp_ep, stcb, netp);
5093 * Now here we are going to recycle net_ack for a different use...
5096 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5101 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5102 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5103 * automatically ensure that.
5105 if ((asoc->sctp_cmt_on_off > 0) &&
5106 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5107 (cmt_dac_flag == 0)) {
5108 this_sack_lowest_newack = cum_ack;
5110 if ((num_seg > 0) || (num_nr_seg > 0)) {
5111 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5112 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5114 /* JRS - Use the congestion control given in the CC module */
5115 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5117 /* Now are we exiting loss recovery ? */
5118 if (will_exit_fast_recovery) {
5119 /* Ok, we must exit fast recovery */
5120 asoc->fast_retran_loss_recovery = 0;
5122 if ((asoc->sat_t3_loss_recovery) &&
5123 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5124 /* end satellite t3 loss recovery */
5125 asoc->sat_t3_loss_recovery = 0;
5130 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5131 if (net->will_exit_fast_recovery) {
5132 /* Ok, we must exit fast recovery */
5133 net->fast_retran_loss_recovery = 0;
5137 /* Adjust and set the new rwnd value */
5138 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5139 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5140 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5142 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5143 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5144 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5145 /* SWS sender side engages */
5146 asoc->peers_rwnd = 0;
5148 if (asoc->peers_rwnd > old_rwnd) {
5149 win_probe_recovery = 1;
5153 * Now we must setup so we have a timer up for anyone with
5159 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5160 if (win_probe_recovery && (net->window_probe)) {
5161 win_probe_recovered = 1;
5163 * Find first chunk that was used with
5164 * window probe and clear the event. Put
5165 * it back into the send queue as if has
5168 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5169 if (tp1->window_probe) {
5170 sctp_window_probe_recovery(stcb, asoc, tp1);
5175 if (net->flight_size) {
5177 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5178 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5179 stcb->sctp_ep, stcb, net);
5181 if (net->window_probe) {
5182 net->window_probe = 0;
5185 if (net->window_probe) {
5187 * In window probes we must assure a timer
5188 * is still running there
5190 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5191 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5192 stcb->sctp_ep, stcb, net);
5195 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5196 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5198 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5203 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5204 (asoc->sent_queue_retran_cnt == 0) &&
5205 (win_probe_recovered == 0) &&
5208 * huh, this should not happen unless all packets are
5209 * PR-SCTP and marked to skip of course.
5211 if (sctp_fs_audit(asoc)) {
5212 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5213 net->flight_size = 0;
5215 asoc->total_flight = 0;
5216 asoc->total_flight_count = 0;
5217 asoc->sent_queue_retran_cnt = 0;
5218 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5219 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5220 sctp_flight_size_increase(tp1);
5221 sctp_total_flight_increase(stcb, tp1);
5222 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5223 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5230 /*********************************************/
5231 /* Here we perform PR-SCTP procedures */
5233 /*********************************************/
5234 /* C1. update advancedPeerAckPoint */
5235 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5236 asoc->advanced_peer_ack_point = cum_ack;
5238 /* C2. try to further move advancedPeerAckPoint ahead */
5239 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5240 struct sctp_tmit_chunk *lchk;
5241 uint32_t old_adv_peer_ack_point;
5243 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5244 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5245 /* C3. See if we need to send a Fwd-TSN */
5246 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5248 * ISSUE with ECN, see FWD-TSN processing.
5250 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5251 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5252 0xee, cum_ack, asoc->advanced_peer_ack_point,
5253 old_adv_peer_ack_point);
5255 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5256 send_forward_tsn(stcb, asoc);
5258 /* try to FR fwd-tsn's that get lost too */
5259 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5260 send_forward_tsn(stcb, asoc);
5265 /* Assure a timer is up */
5266 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5267 stcb->sctp_ep, stcb, lchk->whoTo);
5270 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5271 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5273 stcb->asoc.peers_rwnd,
5274 stcb->asoc.total_flight,
5275 stcb->asoc.total_output_queue_size);
5280 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5283 uint32_t cum_ack, a_rwnd;
5285 cum_ack = ntohl(cp->cumulative_tsn_ack);
5286 /* Arrange so a_rwnd does NOT change */
5287 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5289 /* Now call the express sack handling */
5290 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5294 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5295 struct sctp_stream_in *strmin)
5297 struct sctp_queued_to_read *control, *ncontrol;
5298 struct sctp_association *asoc;
5300 int need_reasm_check = 0;
5303 mid = strmin->last_mid_delivered;
5305 * First deliver anything prior to and including the stream no that
5308 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5309 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5310 /* this is deliverable now */
5311 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5312 if (control->on_strm_q) {
5313 if (control->on_strm_q == SCTP_ON_ORDERED) {
5314 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5315 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5316 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5319 panic("strmin: %p ctl: %p unknown %d",
5320 strmin, control, control->on_strm_q);
5323 control->on_strm_q = 0;
5325 /* subtract pending on streams */
5326 if (asoc->size_on_all_streams >= control->length) {
5327 asoc->size_on_all_streams -= control->length;
5330 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5332 asoc->size_on_all_streams = 0;
5335 sctp_ucount_decr(asoc->cnt_on_all_streams);
5336 /* deliver it to at least the delivery-q */
5337 if (stcb->sctp_socket) {
5338 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5339 sctp_add_to_readq(stcb->sctp_ep, stcb,
5341 &stcb->sctp_socket->so_rcv,
5342 1, SCTP_READ_LOCK_HELD,
5343 SCTP_SO_NOT_LOCKED);
5346 /* Its a fragmented message */
5347 if (control->first_frag_seen) {
5349 * Make it so this is next to
5350 * deliver, we restore later
5352 strmin->last_mid_delivered = control->mid - 1;
5353 need_reasm_check = 1;
5358 /* no more delivery now. */
5362 if (need_reasm_check) {
5365 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5366 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5367 /* Restore the next to deliver unless we are ahead */
5368 strmin->last_mid_delivered = mid;
5371 /* Left the front Partial one on */
5374 need_reasm_check = 0;
5377 * now we must deliver things in queue the normal way if any are
5380 mid = strmin->last_mid_delivered + 1;
5381 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5382 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5383 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5384 /* this is deliverable now */
5385 if (control->on_strm_q) {
5386 if (control->on_strm_q == SCTP_ON_ORDERED) {
5387 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5388 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5389 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5392 panic("strmin: %p ctl: %p unknown %d",
5393 strmin, control, control->on_strm_q);
5396 control->on_strm_q = 0;
5398 /* subtract pending on streams */
5399 if (asoc->size_on_all_streams >= control->length) {
5400 asoc->size_on_all_streams -= control->length;
5403 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5405 asoc->size_on_all_streams = 0;
5408 sctp_ucount_decr(asoc->cnt_on_all_streams);
5409 /* deliver it to at least the delivery-q */
5410 strmin->last_mid_delivered = control->mid;
5411 if (stcb->sctp_socket) {
5412 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5413 sctp_add_to_readq(stcb->sctp_ep, stcb,
5415 &stcb->sctp_socket->so_rcv, 1,
5416 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5419 mid = strmin->last_mid_delivered + 1;
5421 /* Its a fragmented message */
5422 if (control->first_frag_seen) {
5424 * Make it so this is next to
5427 strmin->last_mid_delivered = control->mid - 1;
5428 need_reasm_check = 1;
5436 if (need_reasm_check) {
5437 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5444 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5445 struct sctp_association *asoc,
5446 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5448 struct sctp_queued_to_read *control;
5449 struct sctp_stream_in *strm;
5450 struct sctp_tmit_chunk *chk, *nchk;
5451 int cnt_removed = 0;
5454 * For now large messages held on the stream reasm that are complete
5455 * will be tossed too. We could in theory do more work to spin
5456 * through and stop after dumping one msg aka seeing the start of a
5457 * new msg at the head, and call the delivery function... to see if
5458 * it can be delivered... But for now we just dump everything on the
5461 strm = &asoc->strmin[stream];
5462 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5463 if (control == NULL) {
5467 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5470 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5471 /* Purge hanging chunks */
5472 if (!asoc->idata_supported && (ordered == 0)) {
5473 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5478 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5479 if (asoc->size_on_reasm_queue >= chk->send_size) {
5480 asoc->size_on_reasm_queue -= chk->send_size;
5483 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5485 asoc->size_on_reasm_queue = 0;
5488 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5490 sctp_m_freem(chk->data);
5493 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5495 if (!TAILQ_EMPTY(&control->reasm)) {
5496 /* This has to be old data, unordered */
5497 if (control->data) {
5498 sctp_m_freem(control->data);
5499 control->data = NULL;
5501 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5502 chk = TAILQ_FIRST(&control->reasm);
5503 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5504 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5505 sctp_add_chk_to_control(control, strm, stcb, asoc,
5506 chk, SCTP_READ_LOCK_HELD);
5508 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5511 if (control->on_strm_q == SCTP_ON_ORDERED) {
5512 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5513 if (asoc->size_on_all_streams >= control->length) {
5514 asoc->size_on_all_streams -= control->length;
5517 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5519 asoc->size_on_all_streams = 0;
5522 sctp_ucount_decr(asoc->cnt_on_all_streams);
5523 control->on_strm_q = 0;
5524 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5525 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5526 control->on_strm_q = 0;
5528 } else if (control->on_strm_q) {
5529 panic("strm: %p ctl: %p unknown %d",
5530 strm, control, control->on_strm_q);
5533 control->on_strm_q = 0;
5534 if (control->on_read_q == 0) {
5535 sctp_free_remote_addr(control->whoFrom);
5536 if (control->data) {
5537 sctp_m_freem(control->data);
5538 control->data = NULL;
5540 sctp_free_a_readq(stcb, control);
5545 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5546 struct sctp_forward_tsn_chunk *fwd,
5547 int *abort_flag, struct mbuf *m, int offset)
5549 /* The pr-sctp fwd tsn */
5551 * here we will perform all the data receiver side steps for
5552 * processing FwdTSN, as required in by pr-sctp draft:
5554 * Assume we get FwdTSN(x):
5556 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5557 * + others we have 3) examine and update re-ordering queue on
5558 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5559 * report where we are.
5561 struct sctp_association *asoc;
5562 uint32_t new_cum_tsn, gap;
5563 unsigned int i, fwd_sz, m_size;
5565 struct sctp_stream_in *strm;
5566 struct sctp_queued_to_read *control, *sv;
5569 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5570 SCTPDBG(SCTP_DEBUG_INDATA1,
5571 "Bad size too small/big fwd-tsn\n");
5574 m_size = (stcb->asoc.mapping_array_size << 3);
5575 /*************************************************************/
5576 /* 1. Here we update local cumTSN and shift the bitmap array */
5577 /*************************************************************/
5578 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5580 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5581 /* Already got there ... */
5585 * now we know the new TSN is more advanced, let's find the actual
5588 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5589 asoc->cumulative_tsn = new_cum_tsn;
5590 if (gap >= m_size) {
5591 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5592 struct mbuf *op_err;
5593 char msg[SCTP_DIAG_INFO_LEN];
5596 * out of range (of single byte chunks in the rwnd I
5597 * give out). This must be an attacker.
5600 snprintf(msg, sizeof(msg),
5601 "New cum ack %8.8x too high, highest TSN %8.8x",
5602 new_cum_tsn, asoc->highest_tsn_inside_map);
5603 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5604 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5605 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5608 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5610 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5611 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5612 asoc->highest_tsn_inside_map = new_cum_tsn;
5614 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5615 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5618 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5621 SCTP_TCB_LOCK_ASSERT(stcb);
5622 for (i = 0; i <= gap; i++) {
5623 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5624 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5625 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5626 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5627 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5632 /*************************************************************/
5633 /* 2. Clear up re-assembly queue */
5634 /*************************************************************/
5636 /* This is now done as part of clearing up the stream/seq */
5637 if (asoc->idata_supported == 0) {
5640 /* Flush all the un-ordered data based on cum-tsn */
5641 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5642 for (sid = 0; sid < asoc->streamincnt; sid++) {
5643 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5645 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5647 /*******************************************************/
5648 /* 3. Update the PR-stream re-ordering queues and fix */
5649 /* delivery issues as needed. */
5650 /*******************************************************/
5651 fwd_sz -= sizeof(*fwd);
5654 unsigned int num_str;
5655 uint32_t mid, cur_mid;
5657 uint16_t ordered, flags;
5658 struct sctp_strseq *stseq, strseqbuf;
5659 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5661 offset += sizeof(*fwd);
5663 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5664 if (asoc->idata_supported) {
5665 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5667 num_str = fwd_sz / sizeof(struct sctp_strseq);
5669 for (i = 0; i < num_str; i++) {
5670 if (asoc->idata_supported) {
5671 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5672 sizeof(struct sctp_strseq_mid),
5673 (uint8_t *)&strseqbuf_m);
5674 offset += sizeof(struct sctp_strseq_mid);
5675 if (stseq_m == NULL) {
5678 sid = ntohs(stseq_m->sid);
5679 mid = ntohl(stseq_m->mid);
5680 flags = ntohs(stseq_m->flags);
5681 if (flags & PR_SCTP_UNORDERED_FLAG) {
5687 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5688 sizeof(struct sctp_strseq),
5689 (uint8_t *)&strseqbuf);
5690 offset += sizeof(struct sctp_strseq);
5691 if (stseq == NULL) {
5694 sid = ntohs(stseq->sid);
5695 mid = (uint32_t)ntohs(stseq->ssn);
5703 * Ok we now look for the stream/seq on the read
5704 * queue where its not all delivered. If we find it
5705 * we transmute the read entry into a PDI_ABORTED.
5707 if (sid >= asoc->streamincnt) {
5708 /* screwed up streams, stop! */
5711 if ((asoc->str_of_pdapi == sid) &&
5712 (asoc->ssn_of_pdapi == mid)) {
5714 * If this is the one we were partially
5715 * delivering now then we no longer are.
5716 * Note this will change with the reassembly
5719 asoc->fragmented_delivery_inprogress = 0;
5721 strm = &asoc->strmin[sid];
5722 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5723 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5725 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5726 if ((control->sinfo_stream == sid) &&
5727 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5728 str_seq = (sid << 16) | (0x0000ffff & mid);
5729 control->pdapi_aborted = 1;
5730 sv = stcb->asoc.control_pdapi;
5731 control->end_added = 1;
5732 if (control->on_strm_q == SCTP_ON_ORDERED) {
5733 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5734 if (asoc->size_on_all_streams >= control->length) {
5735 asoc->size_on_all_streams -= control->length;
5738 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5740 asoc->size_on_all_streams = 0;
5743 sctp_ucount_decr(asoc->cnt_on_all_streams);
5744 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5745 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5747 } else if (control->on_strm_q) {
5748 panic("strm: %p ctl: %p unknown %d",
5749 strm, control, control->on_strm_q);
5752 control->on_strm_q = 0;
5753 stcb->asoc.control_pdapi = control;
5754 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5756 SCTP_PARTIAL_DELIVERY_ABORTED,
5758 SCTP_SO_NOT_LOCKED);
5759 stcb->asoc.control_pdapi = sv;
5761 } else if ((control->sinfo_stream == sid) &&
5762 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5763 /* We are past our victim SSN */
5767 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5768 /* Update the sequence number */
5769 strm->last_mid_delivered = mid;
5771 /* now kick the stream the new way */
5772 /* sa_ignore NO_NULL_CHK */
5773 sctp_kick_prsctp_reorder_queue(stcb, strm);
5775 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5778 * Now slide thing forward.
5780 sctp_slide_mapping_arrays(stcb);