2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int lock_held);
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
78 /* Calculate what the rwnd would be */
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
85 * This is really set wrong with respect to a 1-2-m socket. Since
86 * the sb_cc is the count that everyone as put up. When we re-write
87 * sctp_soreceive then we will fix this so that ONLY this
88 * associations data is taken into account.
90 if (stcb->sctp_socket == NULL) {
94 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 if (stcb->asoc.sb_cc == 0 &&
99 asoc->cnt_on_reasm_queue == 0 &&
100 asoc->cnt_on_all_streams == 0) {
101 /* Full rwnd granted */
102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
105 /* get actual space */
106 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
108 * take out what has NOT been put on socket queue and we yet hold
111 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 asoc->cnt_on_reasm_queue * MSIZE));
113 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 asoc->cnt_on_all_streams * MSIZE));
120 /* what is the overhead of all these rwnd's */
121 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
123 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 * even it is 0. SWS engaged
126 if (calc < stcb->asoc.my_rwnd_control_len) {
135 * Build out our readq entry based on the incoming packet.
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139 struct sctp_nets *net,
140 uint32_t tsn, uint32_t ppid,
141 uint32_t context, uint16_t sid,
142 uint32_t mid, uint8_t flags,
145 struct sctp_queued_to_read *read_queue_e = NULL;
147 sctp_alloc_a_readq(stcb, read_queue_e);
148 if (read_queue_e == NULL) {
151 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 read_queue_e->sinfo_stream = sid;
153 read_queue_e->sinfo_flags = (flags << 8);
154 read_queue_e->sinfo_ppid = ppid;
155 read_queue_e->sinfo_context = context;
156 read_queue_e->sinfo_tsn = tsn;
157 read_queue_e->sinfo_cumtsn = tsn;
158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 read_queue_e->mid = mid;
160 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 TAILQ_INIT(&read_queue_e->reasm);
162 read_queue_e->whoFrom = net;
163 atomic_add_int(&net->ref_count, 1);
164 read_queue_e->data = dm;
165 read_queue_e->stcb = stcb;
166 read_queue_e->port_from = stcb->rport;
168 return (read_queue_e);
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
174 struct sctp_extrcvinfo *seinfo;
175 struct sctp_sndrcvinfo *outinfo;
176 struct sctp_rcvinfo *rcvinfo;
177 struct sctp_nxtinfo *nxtinfo;
184 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 /* user does not want any ancillary data */
192 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
195 seinfo = (struct sctp_extrcvinfo *)sinfo;
196 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
199 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
206 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
209 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
215 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
220 SCTP_BUF_LEN(ret) = 0;
222 /* We need a CMSG header followed by the struct */
223 cmh = mtod(ret, struct cmsghdr *);
225 * Make sure that there is no un-initialized padding between the
226 * cmsg header and cmsg data and after the cmsg data.
229 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 cmh->cmsg_level = IPPROTO_SCTP;
231 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 cmh->cmsg_type = SCTP_RCVINFO;
233 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 rcvinfo->rcv_context = sinfo->sinfo_context;
241 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
246 cmh->cmsg_level = IPPROTO_SCTP;
247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 cmh->cmsg_type = SCTP_NXTINFO;
249 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 nxtinfo->nxt_flags = 0;
252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 nxtinfo->nxt_flags |= SCTP_UNORDERED;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 nxtinfo->nxt_flags |= SCTP_COMPLETE;
261 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
267 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 cmh->cmsg_level = IPPROTO_SCTP;
269 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 cmh->cmsg_type = SCTP_EXTRCV;
273 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 cmh->cmsg_type = SCTP_SNDRCV;
279 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
287 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
289 uint32_t gap, i, cumackp1;
291 int in_r = 0, in_nr = 0;
293 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
296 cumackp1 = asoc->cumulative_tsn + 1;
297 if (SCTP_TSN_GT(cumackp1, tsn)) {
299 * this tsn is behind the cum ack and thus we don't need to
300 * worry about it being moved from one to the other.
304 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
305 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
306 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 if ((in_r == 0) && (in_nr == 0)) {
309 panic("Things are really messed up now");
311 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
312 sctp_print_mapping_array(asoc);
316 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
318 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
319 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
320 asoc->highest_tsn_inside_nr_map = tsn;
322 if (tsn == asoc->highest_tsn_inside_map) {
323 /* We must back down to see what the new highest is */
324 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
325 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 asoc->highest_tsn_inside_map = i;
333 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
339 sctp_place_control_in_stream(struct sctp_stream_in *strm,
340 struct sctp_association *asoc,
341 struct sctp_queued_to_read *control)
343 struct sctp_queued_to_read *at;
344 struct sctp_readhead *q;
345 uint8_t flags, unordered;
347 flags = (control->sinfo_flags >> 8);
348 unordered = flags & SCTP_DATA_UNORDERED;
350 q = &strm->uno_inqueue;
351 if (asoc->idata_supported == 0) {
352 if (!TAILQ_EMPTY(q)) {
354 * Only one stream can be here in old style
359 TAILQ_INSERT_TAIL(q, control, next_instrm);
360 control->on_strm_q = SCTP_ON_UNORDERED;
366 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
367 control->end_added = 1;
368 control->first_frag_seen = 1;
369 control->last_frag_seen = 1;
371 if (TAILQ_EMPTY(q)) {
373 TAILQ_INSERT_HEAD(q, control, next_instrm);
375 control->on_strm_q = SCTP_ON_UNORDERED;
377 control->on_strm_q = SCTP_ON_ORDERED;
381 TAILQ_FOREACH(at, q, next_instrm) {
382 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
384 * one in queue is bigger than the new one,
385 * insert before this one
387 TAILQ_INSERT_BEFORE(at, control, next_instrm);
389 control->on_strm_q = SCTP_ON_UNORDERED;
391 control->on_strm_q = SCTP_ON_ORDERED;
394 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
396 * Gak, He sent me a duplicate msg id
397 * number?? return -1 to abort.
401 if (TAILQ_NEXT(at, next_instrm) == NULL) {
403 * We are at the end, insert it
406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
407 sctp_log_strm_del(control, at,
408 SCTP_STR_LOG_FROM_INSERT_TL);
410 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
412 control->on_strm_q = SCTP_ON_UNORDERED;
414 control->on_strm_q = SCTP_ON_ORDERED;
425 sctp_abort_in_reasm(struct sctp_tcb *stcb,
426 struct sctp_queued_to_read *control,
427 struct sctp_tmit_chunk *chk,
428 int *abort_flag, int opspot)
430 char msg[SCTP_DIAG_INFO_LEN];
433 if (stcb->asoc.idata_supported) {
434 snprintf(msg, sizeof(msg),
435 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
437 control->fsn_included,
440 chk->rec.data.fsn, chk->rec.data.mid);
442 snprintf(msg, sizeof(msg),
443 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
445 control->fsn_included,
449 (uint16_t)chk->rec.data.mid);
451 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
452 sctp_m_freem(chk->data);
454 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
455 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
456 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
461 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
464 * The control could not be placed and must be cleaned.
466 struct sctp_tmit_chunk *chk, *nchk;
468 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
469 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
471 sctp_m_freem(chk->data);
473 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
475 sctp_free_a_readq(stcb, control);
479 * Queue the chunk either right into the socket buffer if it is the next one
480 * to go OR put it in the correct place in the delivery queue. If we do
481 * append to the so_buf, keep doing so until we are out of order as
482 * long as the control's entered are non-fragmented.
485 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
486 struct sctp_association *asoc,
487 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
490 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
491 * all the data in one stream this could happen quite rapidly. One
492 * could use the TSN to keep track of things, but this scheme breaks
493 * down in the other type of stream usage that could occur. Send a
494 * single msg to stream 0, send 4Billion messages to stream 1, now
495 * send a message to stream 0. You have a situation where the TSN
496 * has wrapped but not in the stream. Is this worth worrying about
497 * or should we just change our queue sort at the bottom to be by
500 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
501 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
502 * assignment this could happen... and I don't see how this would be
503 * a violation. So for now I am undecided an will leave the sort by
504 * SSN alone. Maybe a hybred approach is the answer
507 struct sctp_queued_to_read *at;
511 struct sctp_stream_in *strm;
512 char msg[SCTP_DIAG_INFO_LEN];
514 strm = &asoc->strmin[control->sinfo_stream];
515 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
516 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
518 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
519 /* The incoming sseq is behind where we last delivered? */
520 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
521 strm->last_mid_delivered, control->mid);
523 * throw it in the stream so it gets cleaned up in
524 * association destruction
526 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
527 if (asoc->idata_supported) {
528 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
529 strm->last_mid_delivered, control->sinfo_tsn,
530 control->sinfo_stream, control->mid);
532 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
533 (uint16_t)strm->last_mid_delivered,
535 control->sinfo_stream,
536 (uint16_t)control->mid);
538 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
539 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
540 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
546 asoc->size_on_all_streams += control->length;
547 sctp_ucount_incr(asoc->cnt_on_all_streams);
548 nxt_todel = strm->last_mid_delivered + 1;
549 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
550 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
553 so = SCTP_INP_SO(stcb->sctp_ep);
554 atomic_add_int(&stcb->asoc.refcnt, 1);
555 SCTP_TCB_UNLOCK(stcb);
556 SCTP_SOCKET_LOCK(so, 1);
558 atomic_subtract_int(&stcb->asoc.refcnt, 1);
559 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
560 SCTP_SOCKET_UNLOCK(so, 1);
564 /* can be delivered right away? */
565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
566 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
568 /* EY it wont be queued if it could be delivered directly */
570 if (asoc->size_on_all_streams >= control->length) {
571 asoc->size_on_all_streams -= control->length;
574 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
576 asoc->size_on_all_streams = 0;
579 sctp_ucount_decr(asoc->cnt_on_all_streams);
580 strm->last_mid_delivered++;
581 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
582 sctp_add_to_readq(stcb->sctp_ep, stcb,
584 &stcb->sctp_socket->so_rcv, 1,
585 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
586 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
588 nxt_todel = strm->last_mid_delivered + 1;
589 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
590 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
591 if (control->on_strm_q == SCTP_ON_ORDERED) {
592 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
593 if (asoc->size_on_all_streams >= control->length) {
594 asoc->size_on_all_streams -= control->length;
597 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
599 asoc->size_on_all_streams = 0;
602 sctp_ucount_decr(asoc->cnt_on_all_streams);
605 panic("Huh control: %p is on_strm_q: %d",
606 control, control->on_strm_q);
609 control->on_strm_q = 0;
610 strm->last_mid_delivered++;
612 * We ignore the return of deliver_data here
613 * since we always can hold the chunk on the
614 * d-queue. And we have a finite number that
615 * can be delivered from the strq.
617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
618 sctp_log_strm_del(control, NULL,
619 SCTP_STR_LOG_FROM_IMMED_DEL);
621 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
622 sctp_add_to_readq(stcb->sctp_ep, stcb,
624 &stcb->sctp_socket->so_rcv, 1,
625 SCTP_READ_LOCK_NOT_HELD,
628 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
633 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
634 SCTP_SOCKET_UNLOCK(so, 1);
639 * Ok, we did not deliver this guy, find the correct place
640 * to put it on the queue.
642 if (sctp_place_control_in_stream(strm, asoc, control)) {
643 snprintf(msg, sizeof(msg),
644 "Queue to str MID: %u duplicate",
646 sctp_clean_up_control(stcb, control);
647 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
648 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
649 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
657 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
659 struct mbuf *m, *prev = NULL;
660 struct sctp_tcb *stcb;
662 stcb = control->stcb;
663 control->held_length = 0;
667 if (SCTP_BUF_LEN(m) == 0) {
668 /* Skip mbufs with NO length */
671 control->data = sctp_m_free(m);
674 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
675 m = SCTP_BUF_NEXT(prev);
678 control->tail_mbuf = prev;
683 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
684 if (control->on_read_q) {
686 * On read queue so we must increment the SB stuff,
687 * we assume caller has done any locks of SB.
689 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
691 m = SCTP_BUF_NEXT(m);
694 control->tail_mbuf = prev;
699 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
701 struct mbuf *prev = NULL;
702 struct sctp_tcb *stcb;
704 stcb = control->stcb;
707 panic("Control broken");
712 if (control->tail_mbuf == NULL) {
715 sctp_setup_tail_pointer(control);
718 control->tail_mbuf->m_next = m;
720 if (SCTP_BUF_LEN(m) == 0) {
721 /* Skip mbufs with NO length */
724 control->tail_mbuf->m_next = sctp_m_free(m);
725 m = control->tail_mbuf->m_next;
727 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
728 m = SCTP_BUF_NEXT(prev);
731 control->tail_mbuf = prev;
736 if (control->on_read_q) {
738 * On read queue so we must increment the SB stuff,
739 * we assume caller has done any locks of SB.
741 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
743 *added += SCTP_BUF_LEN(m);
744 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
745 m = SCTP_BUF_NEXT(m);
748 control->tail_mbuf = prev;
753 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
755 memset(nc, 0, sizeof(struct sctp_queued_to_read));
756 nc->sinfo_stream = control->sinfo_stream;
757 nc->mid = control->mid;
758 TAILQ_INIT(&nc->reasm);
759 nc->top_fsn = control->top_fsn;
760 nc->mid = control->mid;
761 nc->sinfo_flags = control->sinfo_flags;
762 nc->sinfo_ppid = control->sinfo_ppid;
763 nc->sinfo_context = control->sinfo_context;
764 nc->fsn_included = 0xffffffff;
765 nc->sinfo_tsn = control->sinfo_tsn;
766 nc->sinfo_cumtsn = control->sinfo_cumtsn;
767 nc->sinfo_assoc_id = control->sinfo_assoc_id;
768 nc->whoFrom = control->whoFrom;
769 atomic_add_int(&nc->whoFrom->ref_count, 1);
770 nc->stcb = control->stcb;
771 nc->port_from = control->port_from;
775 sctp_reset_a_control(struct sctp_queued_to_read *control,
776 struct sctp_inpcb *inp, uint32_t tsn)
778 control->fsn_included = tsn;
779 if (control->on_read_q) {
781 * We have to purge it from there, hopefully this will work
784 TAILQ_REMOVE(&inp->read_queue, control, next);
785 control->on_read_q = 0;
790 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
791 struct sctp_association *asoc,
792 struct sctp_stream_in *strm,
793 struct sctp_queued_to_read *control,
795 int inp_read_lock_held)
798 * Special handling for the old un-ordered data chunk. All the
799 * chunks/TSN's go to mid 0. So we have to do the old style watching
800 * to see if we have it all. If you return one, no other control
801 * entries on the un-ordered queue will be looked at. In theory
802 * there should be no others entries in reality, unless the guy is
803 * sending both unordered NDATA and unordered DATA...
805 struct sctp_tmit_chunk *chk, *lchk, *tchk;
807 struct sctp_queued_to_read *nc;
810 if (control->first_frag_seen == 0) {
811 /* Nothing we can do, we have not seen the first piece yet */
814 /* Collapse any we can */
817 fsn = control->fsn_included + 1;
818 /* Now what can we add? */
819 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
820 if (chk->rec.data.fsn == fsn) {
822 sctp_alloc_a_readq(stcb, nc);
826 memset(nc, 0, sizeof(struct sctp_queued_to_read));
827 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
828 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
832 if (control->end_added) {
834 if (!TAILQ_EMPTY(&control->reasm)) {
836 * Ok we have to move anything left
837 * on the control queue to a new
840 sctp_build_readq_entry_from_ctl(nc, control);
841 tchk = TAILQ_FIRST(&control->reasm);
842 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
843 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
844 if (asoc->size_on_reasm_queue >= tchk->send_size) {
845 asoc->size_on_reasm_queue -= tchk->send_size;
848 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
850 asoc->size_on_reasm_queue = 0;
853 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
854 nc->first_frag_seen = 1;
855 nc->fsn_included = tchk->rec.data.fsn;
856 nc->data = tchk->data;
857 nc->sinfo_ppid = tchk->rec.data.ppid;
858 nc->sinfo_tsn = tchk->rec.data.tsn;
859 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
861 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
862 sctp_setup_tail_pointer(nc);
863 tchk = TAILQ_FIRST(&control->reasm);
865 /* Spin the rest onto the queue */
867 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
868 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
869 tchk = TAILQ_FIRST(&control->reasm);
872 * Now lets add it to the queue
873 * after removing control
875 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
876 nc->on_strm_q = SCTP_ON_UNORDERED;
877 if (control->on_strm_q) {
878 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
879 control->on_strm_q = 0;
882 if (control->pdapi_started) {
883 strm->pd_api_started = 0;
884 control->pdapi_started = 0;
886 if (control->on_strm_q) {
887 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
888 control->on_strm_q = 0;
889 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
891 if (control->on_read_q == 0) {
892 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
893 &stcb->sctp_socket->so_rcv, control->end_added,
894 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
896 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
897 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
899 * Switch to the new guy and
905 if (nc->on_strm_q == 0) {
906 sctp_free_a_readq(stcb, nc);
911 sctp_free_a_readq(stcb, nc);
918 if (cnt_added && strm->pd_api_started) {
919 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
921 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
922 strm->pd_api_started = 1;
923 control->pdapi_started = 1;
924 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
925 &stcb->sctp_socket->so_rcv, control->end_added,
926 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
927 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
935 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
936 struct sctp_association *asoc,
937 struct sctp_queued_to_read *control,
938 struct sctp_tmit_chunk *chk,
941 struct sctp_tmit_chunk *at;
945 * Here we need to place the chunk into the control structure sorted
946 * in the correct order.
948 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
949 /* Its the very first one. */
950 SCTPDBG(SCTP_DEBUG_XXX,
951 "chunk is a first fsn: %u becomes fsn_included\n",
953 at = TAILQ_FIRST(&control->reasm);
954 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
956 * The first chunk in the reassembly is a smaller
957 * TSN than this one, even though this has a first,
958 * it must be from a subsequent msg.
962 if (control->first_frag_seen) {
964 * In old un-ordered we can reassembly on one
965 * control multiple messages. As long as the next
966 * FIRST is greater then the old first (TSN i.e. FSN
972 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
974 * Easy way the start of a new guy beyond
979 if ((chk->rec.data.fsn == control->fsn_included) ||
980 (control->pdapi_started)) {
982 * Ok this should not happen, if it does we
983 * started the pd-api on the higher TSN
984 * (since the equals part is a TSN failure
987 * We are completly hosed in that case since
988 * I have no way to recover. This really
989 * will only happen if we can get more TSN's
990 * higher before the pd-api-point.
992 sctp_abort_in_reasm(stcb, control, chk,
994 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
999 * Ok we have two firsts and the one we just got is
1000 * smaller than the one we previously placed.. yuck!
1001 * We must swap them out.
1003 /* swap the mbufs */
1004 tdata = control->data;
1005 control->data = chk->data;
1007 /* Save the lengths */
1008 chk->send_size = control->length;
1009 /* Recompute length of control and tail pointer */
1010 sctp_setup_tail_pointer(control);
1011 /* Fix the FSN included */
1012 tmp = control->fsn_included;
1013 control->fsn_included = chk->rec.data.fsn;
1014 chk->rec.data.fsn = tmp;
1015 /* Fix the TSN included */
1016 tmp = control->sinfo_tsn;
1017 control->sinfo_tsn = chk->rec.data.tsn;
1018 chk->rec.data.tsn = tmp;
1019 /* Fix the PPID included */
1020 tmp = control->sinfo_ppid;
1021 control->sinfo_ppid = chk->rec.data.ppid;
1022 chk->rec.data.ppid = tmp;
1023 /* Fix tail pointer */
1026 control->first_frag_seen = 1;
1027 control->fsn_included = chk->rec.data.fsn;
1028 control->top_fsn = chk->rec.data.fsn;
1029 control->sinfo_tsn = chk->rec.data.tsn;
1030 control->sinfo_ppid = chk->rec.data.ppid;
1031 control->data = chk->data;
1032 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1034 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1035 sctp_setup_tail_pointer(control);
1040 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1041 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1043 * This one in queue is bigger than the new one,
1044 * insert the new one before at.
1046 asoc->size_on_reasm_queue += chk->send_size;
1047 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1049 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1051 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1053 * They sent a duplicate fsn number. This really
1054 * should not happen since the FSN is a TSN and it
1055 * should have been dropped earlier.
1057 sctp_abort_in_reasm(stcb, control, chk,
1059 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1064 if (inserted == 0) {
1065 /* Its at the end */
1066 asoc->size_on_reasm_queue += chk->send_size;
1067 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1068 control->top_fsn = chk->rec.data.fsn;
1069 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1074 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1075 struct sctp_stream_in *strm, int inp_read_lock_held)
1078 * Given a stream, strm, see if any of the SSN's on it that are
1079 * fragmented are ready to deliver. If so go ahead and place them on
1080 * the read queue. In so placing if we have hit the end, then we
1081 * need to remove them from the stream's queue.
1083 struct sctp_queued_to_read *control, *nctl = NULL;
1084 uint32_t next_to_del;
1088 if (stcb->sctp_socket) {
1089 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1090 stcb->sctp_ep->partial_delivery_point);
1092 pd_point = stcb->sctp_ep->partial_delivery_point;
1094 control = TAILQ_FIRST(&strm->uno_inqueue);
1096 if ((control != NULL) &&
1097 (asoc->idata_supported == 0)) {
1098 /* Special handling needed for "old" data format */
1099 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1103 if (strm->pd_api_started) {
1104 /* Can't add more */
1108 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1109 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1110 nctl = TAILQ_NEXT(control, next_instrm);
1111 if (control->end_added) {
1112 /* We just put the last bit on */
1113 if (control->on_strm_q) {
1115 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1116 panic("Huh control: %p on_q: %d -- not unordered?",
1117 control, control->on_strm_q);
1120 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1121 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1122 control->on_strm_q = 0;
1124 if (control->on_read_q == 0) {
1125 sctp_add_to_readq(stcb->sctp_ep, stcb,
1127 &stcb->sctp_socket->so_rcv, control->end_added,
1128 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1131 /* Can we do a PD-API for this un-ordered guy? */
1132 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1133 strm->pd_api_started = 1;
1134 control->pdapi_started = 1;
1135 sctp_add_to_readq(stcb->sctp_ep, stcb,
1137 &stcb->sctp_socket->so_rcv, control->end_added,
1138 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1146 control = TAILQ_FIRST(&strm->inqueue);
1147 if (strm->pd_api_started) {
1148 /* Can't add more */
1151 if (control == NULL) {
1154 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1156 * Ok the guy at the top was being partially delivered
1157 * completed, so we remove it. Note the pd_api flag was
1158 * taken off when the chunk was merged on in
1159 * sctp_queue_data_for_reasm below.
1161 nctl = TAILQ_NEXT(control, next_instrm);
1162 SCTPDBG(SCTP_DEBUG_XXX,
1163 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1164 control, control->end_added, control->mid,
1165 control->top_fsn, control->fsn_included,
1166 strm->last_mid_delivered);
1167 if (control->end_added) {
1168 if (control->on_strm_q) {
1170 if (control->on_strm_q != SCTP_ON_ORDERED) {
1171 panic("Huh control: %p on_q: %d -- not ordered?",
1172 control, control->on_strm_q);
1175 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1176 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1177 if (asoc->size_on_all_streams >= control->length) {
1178 asoc->size_on_all_streams -= control->length;
1181 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1183 asoc->size_on_all_streams = 0;
1186 sctp_ucount_decr(asoc->cnt_on_all_streams);
1187 control->on_strm_q = 0;
1189 if (strm->pd_api_started && control->pdapi_started) {
1190 control->pdapi_started = 0;
1191 strm->pd_api_started = 0;
1193 if (control->on_read_q == 0) {
1194 sctp_add_to_readq(stcb->sctp_ep, stcb,
1196 &stcb->sctp_socket->so_rcv, control->end_added,
1197 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1202 if (strm->pd_api_started) {
1204 * Can't add more must have gotten an un-ordered above being
1205 * partially delivered.
1210 next_to_del = strm->last_mid_delivered + 1;
1212 SCTPDBG(SCTP_DEBUG_XXX,
1213 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1214 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1216 nctl = TAILQ_NEXT(control, next_instrm);
1217 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1218 (control->first_frag_seen)) {
1221 /* Ok we can deliver it onto the stream. */
1222 if (control->end_added) {
1223 /* We are done with it afterwards */
1224 if (control->on_strm_q) {
1226 if (control->on_strm_q != SCTP_ON_ORDERED) {
1227 panic("Huh control: %p on_q: %d -- not ordered?",
1228 control, control->on_strm_q);
1231 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1232 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1233 if (asoc->size_on_all_streams >= control->length) {
1234 asoc->size_on_all_streams -= control->length;
1237 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1239 asoc->size_on_all_streams = 0;
1242 sctp_ucount_decr(asoc->cnt_on_all_streams);
1243 control->on_strm_q = 0;
1247 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1249 * A singleton now slipping through - mark
1250 * it non-revokable too
1252 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1253 } else if (control->end_added == 0) {
1255 * Check if we can defer adding until its
1258 if ((control->length < pd_point) || (strm->pd_api_started)) {
1260 * Don't need it or cannot add more
1261 * (one being delivered that way)
1266 done = (control->end_added) && (control->last_frag_seen);
1267 if (control->on_read_q == 0) {
1269 if (asoc->size_on_all_streams >= control->length) {
1270 asoc->size_on_all_streams -= control->length;
1273 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1275 asoc->size_on_all_streams = 0;
1278 strm->pd_api_started = 1;
1279 control->pdapi_started = 1;
1281 sctp_add_to_readq(stcb->sctp_ep, stcb,
1283 &stcb->sctp_socket->so_rcv, control->end_added,
1284 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1286 strm->last_mid_delivered = next_to_del;
1299 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1300 struct sctp_stream_in *strm,
1301 struct sctp_tcb *stcb, struct sctp_association *asoc,
1302 struct sctp_tmit_chunk *chk, int hold_rlock)
1305 * Given a control and a chunk, merge the data from the chk onto the
1306 * control and free up the chunk resources.
1311 if (control->on_read_q && (hold_rlock == 0)) {
1313 * Its being pd-api'd so we must do some locks.
1315 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1318 if (control->data == NULL) {
1319 control->data = chk->data;
1320 sctp_setup_tail_pointer(control);
1322 sctp_add_to_tail_pointer(control, chk->data, &added);
1324 control->fsn_included = chk->rec.data.fsn;
1325 asoc->size_on_reasm_queue -= chk->send_size;
1326 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1327 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1329 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1330 control->first_frag_seen = 1;
1331 control->sinfo_tsn = chk->rec.data.tsn;
1332 control->sinfo_ppid = chk->rec.data.ppid;
1334 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1336 if ((control->on_strm_q) && (control->on_read_q)) {
1337 if (control->pdapi_started) {
1338 control->pdapi_started = 0;
1339 strm->pd_api_started = 0;
1341 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1343 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1344 control->on_strm_q = 0;
1345 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1347 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1349 * Don't need to decrement
1350 * size_on_all_streams, since control is on
1353 sctp_ucount_decr(asoc->cnt_on_all_streams);
1354 control->on_strm_q = 0;
1356 } else if (control->on_strm_q) {
1357 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1358 control->on_strm_q);
1362 control->end_added = 1;
1363 control->last_frag_seen = 1;
1366 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1368 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1373 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1374 * queue, see if anthing can be delivered. If so pull it off (or as much as
1375 * we can. If we run out of space then we must dump what we can and set the
1376 * appropriate flag to say we queued what we could.
1379 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1380 struct sctp_queued_to_read *control,
1381 struct sctp_tmit_chunk *chk,
1382 int created_control,
1383 int *abort_flag, uint32_t tsn)
1386 struct sctp_tmit_chunk *at, *nat;
1387 struct sctp_stream_in *strm;
1388 int do_wakeup, unordered;
1391 strm = &asoc->strmin[control->sinfo_stream];
1393 * For old un-ordered data chunks.
1395 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1400 /* Must be added to the stream-in queue */
1401 if (created_control) {
1402 if (unordered == 0) {
1403 sctp_ucount_incr(asoc->cnt_on_all_streams);
1405 if (sctp_place_control_in_stream(strm, asoc, control)) {
1406 /* Duplicate SSN? */
1407 sctp_abort_in_reasm(stcb, control, chk,
1409 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1410 sctp_clean_up_control(stcb, control);
1413 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1415 * Ok we created this control and now lets validate
1416 * that its legal i.e. there is a B bit set, if not
1417 * and we have up to the cum-ack then its invalid.
1419 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1420 sctp_abort_in_reasm(stcb, control, chk,
1422 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1427 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1428 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1432 * Ok we must queue the chunk into the reasembly portion: o if its
1433 * the first it goes to the control mbuf. o if its not first but the
1434 * next in sequence it goes to the control, and each succeeding one
1435 * in order also goes. o if its not in order we place it on the list
1438 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1439 /* Its the very first one. */
1440 SCTPDBG(SCTP_DEBUG_XXX,
1441 "chunk is a first fsn: %u becomes fsn_included\n",
1443 if (control->first_frag_seen) {
1445 * Error on senders part, they either sent us two
1446 * data chunks with FIRST, or they sent two
1447 * un-ordered chunks that were fragmented at the
1448 * same time in the same stream.
1450 sctp_abort_in_reasm(stcb, control, chk,
1452 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1455 control->first_frag_seen = 1;
1456 control->sinfo_ppid = chk->rec.data.ppid;
1457 control->sinfo_tsn = chk->rec.data.tsn;
1458 control->fsn_included = chk->rec.data.fsn;
1459 control->data = chk->data;
1460 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1462 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1463 sctp_setup_tail_pointer(control);
1464 asoc->size_on_all_streams += control->length;
1466 /* Place the chunk in our list */
1469 if (control->last_frag_seen == 0) {
1470 /* Still willing to raise highest FSN seen */
1471 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1472 SCTPDBG(SCTP_DEBUG_XXX,
1473 "We have a new top_fsn: %u\n",
1475 control->top_fsn = chk->rec.data.fsn;
1477 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1478 SCTPDBG(SCTP_DEBUG_XXX,
1479 "The last fsn is now in place fsn: %u\n",
1481 control->last_frag_seen = 1;
1483 if (asoc->idata_supported || control->first_frag_seen) {
1485 * For IDATA we always check since we know
1486 * that the first fragment is 0. For old
1487 * DATA we have to receive the first before
1488 * we know the first FSN (which is the TSN).
1490 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1492 * We have already delivered up to
1495 sctp_abort_in_reasm(stcb, control, chk,
1497 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1502 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1503 /* Second last? huh? */
1504 SCTPDBG(SCTP_DEBUG_XXX,
1505 "Duplicate last fsn: %u (top: %u) -- abort\n",
1506 chk->rec.data.fsn, control->top_fsn);
1507 sctp_abort_in_reasm(stcb, control,
1509 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1512 if (asoc->idata_supported || control->first_frag_seen) {
1514 * For IDATA we always check since we know
1515 * that the first fragment is 0. For old
1516 * DATA we have to receive the first before
1517 * we know the first FSN (which is the TSN).
1520 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1522 * We have already delivered up to
1525 SCTPDBG(SCTP_DEBUG_XXX,
1526 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1527 chk->rec.data.fsn, control->fsn_included);
1528 sctp_abort_in_reasm(stcb, control, chk,
1530 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1535 * validate not beyond top FSN if we have seen last
1538 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1539 SCTPDBG(SCTP_DEBUG_XXX,
1540 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1543 sctp_abort_in_reasm(stcb, control, chk,
1545 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1550 * If we reach here, we need to place the new chunk in the
1551 * reassembly for this control.
1553 SCTPDBG(SCTP_DEBUG_XXX,
1554 "chunk is a not first fsn: %u needs to be inserted\n",
1556 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1557 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1559 * This one in queue is bigger than the new
1560 * one, insert the new one before at.
1562 SCTPDBG(SCTP_DEBUG_XXX,
1563 "Insert it before fsn: %u\n",
1565 asoc->size_on_reasm_queue += chk->send_size;
1566 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1567 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1570 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1572 * Gak, He sent me a duplicate str seq
1576 * foo bar, I guess I will just free this
1577 * new guy, should we abort too? FIX ME
1578 * MAYBE? Or it COULD be that the SSN's have
1579 * wrapped. Maybe I should compare to TSN
1580 * somehow... sigh for now just blow away
1583 SCTPDBG(SCTP_DEBUG_XXX,
1584 "Duplicate to fsn: %u -- abort\n",
1586 sctp_abort_in_reasm(stcb, control,
1588 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1592 if (inserted == 0) {
1593 /* Goes on the end */
1594 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1596 asoc->size_on_reasm_queue += chk->send_size;
1597 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1598 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1602 * Ok lets see if we can suck any up into the control structure that
1603 * are in seq if it makes sense.
1607 * If the first fragment has not been seen there is no sense in
1610 if (control->first_frag_seen) {
1611 next_fsn = control->fsn_included + 1;
1612 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1613 if (at->rec.data.fsn == next_fsn) {
1614 /* We can add this one now to the control */
1615 SCTPDBG(SCTP_DEBUG_XXX,
1616 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1619 next_fsn, control->fsn_included);
1620 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1621 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1622 if (control->on_read_q) {
1626 * We only add to the
1627 * size-on-all-streams if its not on
1628 * the read q. The read q flag will
1629 * cause a sballoc so its accounted
1632 asoc->size_on_all_streams += lenadded;
1635 if (control->end_added && control->pdapi_started) {
1636 if (strm->pd_api_started) {
1637 strm->pd_api_started = 0;
1638 control->pdapi_started = 0;
1640 if (control->on_read_q == 0) {
1641 sctp_add_to_readq(stcb->sctp_ep, stcb,
1643 &stcb->sctp_socket->so_rcv, control->end_added,
1644 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1654 /* Need to wakeup the reader */
1655 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1659 static struct sctp_queued_to_read *
1660 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1662 struct sctp_queued_to_read *control;
1665 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1666 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1671 if (idata_supported) {
1672 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1673 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1678 control = TAILQ_FIRST(&strm->uno_inqueue);
1685 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1686 struct mbuf **m, int offset, int chk_length,
1687 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1688 int *break_flag, int last_chunk, uint8_t chk_type)
1690 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1691 uint32_t tsn, fsn, gap, mid;
1694 int need_reasm_check = 0;
1696 struct mbuf *op_err;
1697 char msg[SCTP_DIAG_INFO_LEN];
1698 struct sctp_queued_to_read *control, *ncontrol;
1701 struct sctp_stream_reset_list *liste;
1704 int created_control = 0;
1706 if (chk_type == SCTP_IDATA) {
1707 struct sctp_idata_chunk *chunk, chunk_buf;
1709 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1710 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1711 chk_flags = chunk->ch.chunk_flags;
1712 clen = sizeof(struct sctp_idata_chunk);
1713 tsn = ntohl(chunk->dp.tsn);
1714 sid = ntohs(chunk->dp.sid);
1715 mid = ntohl(chunk->dp.mid);
1716 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1718 ppid = chunk->dp.ppid_fsn.ppid;
1720 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1721 ppid = 0xffffffff; /* Use as an invalid value. */
1724 struct sctp_data_chunk *chunk, chunk_buf;
1726 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1727 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1728 chk_flags = chunk->ch.chunk_flags;
1729 clen = sizeof(struct sctp_data_chunk);
1730 tsn = ntohl(chunk->dp.tsn);
1731 sid = ntohs(chunk->dp.sid);
1732 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1734 ppid = chunk->dp.ppid;
1736 if ((size_t)chk_length == clen) {
1738 * Need to send an abort since we had a empty data chunk.
1740 op_err = sctp_generate_no_user_data_cause(tsn);
1741 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1742 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1746 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1747 asoc->send_sack = 1;
1749 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1750 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1751 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1756 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1757 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1758 /* It is a duplicate */
1759 SCTP_STAT_INCR(sctps_recvdupdata);
1760 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1761 /* Record a dup for the next outbound sack */
1762 asoc->dup_tsns[asoc->numduptsns] = tsn;
1765 asoc->send_sack = 1;
1768 /* Calculate the number of TSN's between the base and this TSN */
1769 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1770 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1771 /* Can't hold the bit in the mapping at max array, toss it */
1774 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1775 SCTP_TCB_LOCK_ASSERT(stcb);
1776 if (sctp_expand_mapping_array(asoc, gap)) {
1777 /* Can't expand, drop it */
1781 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1784 /* See if we have received this one already */
1785 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1786 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1787 SCTP_STAT_INCR(sctps_recvdupdata);
1788 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1789 /* Record a dup for the next outbound sack */
1790 asoc->dup_tsns[asoc->numduptsns] = tsn;
1793 asoc->send_sack = 1;
1797 * Check to see about the GONE flag, duplicates would cause a sack
1798 * to be sent up above
1800 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1801 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1802 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1804 * wait a minute, this guy is gone, there is no longer a
1805 * receiver. Send peer an ABORT!
1807 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1808 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1813 * Now before going further we see if there is room. If NOT then we
1814 * MAY let one through only IF this TSN is the one we are waiting
1815 * for on a partial delivery API.
1818 /* Is the stream valid? */
1819 if (sid >= asoc->streamincnt) {
1820 struct sctp_error_invalid_stream *cause;
1822 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1823 0, M_NOWAIT, 1, MT_DATA);
1824 if (op_err != NULL) {
1825 /* add some space up front so prepend will work well */
1826 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1827 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1829 * Error causes are just param's and this one has
1830 * two back to back phdr, one with the error type
1831 * and size, the other with the streamid and a rsvd
1833 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1834 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1835 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1836 cause->stream_id = htons(sid);
1837 cause->reserved = htons(0);
1838 sctp_queue_op_err(stcb, op_err);
1840 SCTP_STAT_INCR(sctps_badsid);
1841 SCTP_TCB_LOCK_ASSERT(stcb);
1842 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1843 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1844 asoc->highest_tsn_inside_nr_map = tsn;
1846 if (tsn == (asoc->cumulative_tsn + 1)) {
1847 /* Update cum-ack */
1848 asoc->cumulative_tsn = tsn;
1853 * If its a fragmented message, lets see if we can find the control
1854 * on the reassembly queues.
1856 if ((chk_type == SCTP_IDATA) &&
1857 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1860 * The first *must* be fsn 0, and other (middle/end) pieces
1861 * can *not* be fsn 0. XXX: This can happen in case of a
1862 * wrap around. Ignore is for now.
1864 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1868 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1869 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1870 chk_flags, control);
1871 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1872 /* See if we can find the re-assembly entity */
1873 if (control != NULL) {
1874 /* We found something, does it belong? */
1875 if (ordered && (mid != control->mid)) {
1876 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1878 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1879 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1880 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1884 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1886 * We can't have a switched order with an
1889 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1893 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1895 * We can't have a switched unordered with a
1898 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1905 * Its a complete segment. Lets validate we don't have a
1906 * re-assembly going on with the same Stream/Seq (for
1907 * ordered) or in the same Stream for unordered.
1909 if (control != NULL) {
1910 if (ordered || asoc->idata_supported) {
1911 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1913 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1916 if ((tsn == control->fsn_included + 1) &&
1917 (control->end_added == 0)) {
1918 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1926 /* now do the tests */
1927 if (((asoc->cnt_on_all_streams +
1928 asoc->cnt_on_reasm_queue +
1929 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1930 (((int)asoc->my_rwnd) <= 0)) {
1932 * When we have NO room in the rwnd we check to make sure
1933 * the reader is doing its job...
1935 if (stcb->sctp_socket->so_rcv.sb_cc) {
1936 /* some to read, wake-up */
1937 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1940 so = SCTP_INP_SO(stcb->sctp_ep);
1941 atomic_add_int(&stcb->asoc.refcnt, 1);
1942 SCTP_TCB_UNLOCK(stcb);
1943 SCTP_SOCKET_LOCK(so, 1);
1944 SCTP_TCB_LOCK(stcb);
1945 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1946 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1947 /* assoc was freed while we were unlocked */
1948 SCTP_SOCKET_UNLOCK(so, 1);
1952 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1953 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1954 SCTP_SOCKET_UNLOCK(so, 1);
1957 /* now is it in the mapping array of what we have accepted? */
1958 if (chk_type == SCTP_DATA) {
1959 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1960 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1961 /* Nope not in the valid range dump it */
1963 sctp_set_rwnd(stcb, asoc);
1964 if ((asoc->cnt_on_all_streams +
1965 asoc->cnt_on_reasm_queue +
1966 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1967 SCTP_STAT_INCR(sctps_datadropchklmt);
1969 SCTP_STAT_INCR(sctps_datadroprwnd);
1975 if (control == NULL) {
1978 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1983 #ifdef SCTP_ASOCLOG_OF_TSNS
1984 SCTP_TCB_LOCK_ASSERT(stcb);
1985 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1986 asoc->tsn_in_at = 0;
1987 asoc->tsn_in_wrapped = 1;
1989 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1990 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1991 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1992 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1993 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1994 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1995 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1996 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2000 * Before we continue lets validate that we are not being fooled by
2001 * an evil attacker. We can only have Nk chunks based on our TSN
2002 * spread allowed by the mapping array N * 8 bits, so there is no
2003 * way our stream sequence numbers could have wrapped. We of course
2004 * only validate the FIRST fragment so the bit must be set.
2006 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2007 (TAILQ_EMPTY(&asoc->resetHead)) &&
2008 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2009 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2010 /* The incoming sseq is behind where we last delivered? */
2011 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2012 mid, asoc->strmin[sid].last_mid_delivered);
2014 if (asoc->idata_supported) {
2015 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2016 asoc->strmin[sid].last_mid_delivered,
2021 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2022 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2027 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2028 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2029 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2033 if (chk_type == SCTP_IDATA) {
2034 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2036 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2038 if (last_chunk == 0) {
2039 if (chk_type == SCTP_IDATA) {
2040 dmbuf = SCTP_M_COPYM(*m,
2041 (offset + sizeof(struct sctp_idata_chunk)),
2044 dmbuf = SCTP_M_COPYM(*m,
2045 (offset + sizeof(struct sctp_data_chunk)),
2048 #ifdef SCTP_MBUF_LOGGING
2049 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2050 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2054 /* We can steal the last chunk */
2058 /* lop off the top part */
2059 if (chk_type == SCTP_IDATA) {
2060 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2062 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2064 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2065 l_len = SCTP_BUF_LEN(dmbuf);
2068 * need to count up the size hopefully does not hit
2074 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2075 l_len += SCTP_BUF_LEN(lat);
2078 if (l_len > the_len) {
2079 /* Trim the end round bytes off too */
2080 m_adj(dmbuf, -(l_len - the_len));
2083 if (dmbuf == NULL) {
2084 SCTP_STAT_INCR(sctps_nomem);
2088 * Now no matter what, we need a control, get one if we don't have
2089 * one (we may have gotten it above when we found the message was
2092 if (control == NULL) {
2093 sctp_alloc_a_readq(stcb, control);
2094 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2099 if (control == NULL) {
2100 SCTP_STAT_INCR(sctps_nomem);
2103 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2106 control->data = dmbuf;
2107 for (mm = control->data; mm; mm = mm->m_next) {
2108 control->length += SCTP_BUF_LEN(mm);
2110 control->tail_mbuf = NULL;
2111 control->end_added = 1;
2112 control->last_frag_seen = 1;
2113 control->first_frag_seen = 1;
2114 control->fsn_included = fsn;
2115 control->top_fsn = fsn;
2117 created_control = 1;
2119 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2120 chk_flags, ordered, mid, control);
2121 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2122 TAILQ_EMPTY(&asoc->resetHead) &&
2124 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2125 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2126 /* Candidate for express delivery */
2128 * Its not fragmented, No PD-API is up, Nothing in the
2129 * delivery queue, Its un-ordered OR ordered and the next to
2130 * deliver AND nothing else is stuck on the stream queue,
2131 * And there is room for it in the socket buffer. Lets just
2132 * stuff it up the buffer....
2134 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2135 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2136 asoc->highest_tsn_inside_nr_map = tsn;
2138 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2141 sctp_add_to_readq(stcb->sctp_ep, stcb,
2142 control, &stcb->sctp_socket->so_rcv,
2143 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2145 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2146 /* for ordered, bump what we delivered */
2147 asoc->strmin[sid].last_mid_delivered++;
2149 SCTP_STAT_INCR(sctps_recvexpress);
2150 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2151 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2152 SCTP_STR_LOG_FROM_EXPRS_DEL);
2155 goto finish_express_del;
2158 /* Now will we need a chunk too? */
2159 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2160 sctp_alloc_a_chunk(stcb, chk);
2162 /* No memory so we drop the chunk */
2163 SCTP_STAT_INCR(sctps_nomem);
2164 if (last_chunk == 0) {
2165 /* we copied it, free the copy */
2166 sctp_m_freem(dmbuf);
2170 chk->rec.data.tsn = tsn;
2171 chk->no_fr_allowed = 0;
2172 chk->rec.data.fsn = fsn;
2173 chk->rec.data.mid = mid;
2174 chk->rec.data.sid = sid;
2175 chk->rec.data.ppid = ppid;
2176 chk->rec.data.context = stcb->asoc.context;
2177 chk->rec.data.doing_fast_retransmit = 0;
2178 chk->rec.data.rcv_flags = chk_flags;
2180 chk->send_size = the_len;
2182 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2185 atomic_add_int(&net->ref_count, 1);
2188 /* Set the appropriate TSN mark */
2189 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2190 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2191 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2192 asoc->highest_tsn_inside_nr_map = tsn;
2195 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2196 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2197 asoc->highest_tsn_inside_map = tsn;
2200 /* Now is it complete (i.e. not fragmented)? */
2201 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2203 * Special check for when streams are resetting. We could be
2204 * more smart about this and check the actual stream to see
2205 * if it is not being reset.. that way we would not create a
2206 * HOLB when amongst streams being reset and those not being
2210 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2211 SCTP_TSN_GT(tsn, liste->tsn)) {
2213 * yep its past where we need to reset... go ahead
2216 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2218 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2220 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2221 unsigned char inserted = 0;
2223 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2224 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2229 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2234 if (inserted == 0) {
2236 * must be put at end, use prevP
2237 * (all setup from loop) to setup
2240 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2243 goto finish_express_del;
2245 if (chk_flags & SCTP_DATA_UNORDERED) {
2246 /* queue directly into socket buffer */
2247 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2249 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2250 sctp_add_to_readq(stcb->sctp_ep, stcb,
2252 &stcb->sctp_socket->so_rcv, 1,
2253 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2256 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2258 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2266 goto finish_express_del;
2268 /* If we reach here its a reassembly */
2269 need_reasm_check = 1;
2270 SCTPDBG(SCTP_DEBUG_XXX,
2271 "Queue data to stream for reasm control: %p MID: %u\n",
2273 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2276 * the assoc is now gone and chk was put onto the reasm
2277 * queue, which has all been freed.
2285 /* Here we tidy up things */
2286 if (tsn == (asoc->cumulative_tsn + 1)) {
2287 /* Update cum-ack */
2288 asoc->cumulative_tsn = tsn;
2294 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2296 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2298 SCTP_STAT_INCR(sctps_recvdata);
2299 /* Set it present please */
2300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2301 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2303 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2304 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2305 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2307 if (need_reasm_check) {
2308 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2309 need_reasm_check = 0;
2311 /* check the special flag for stream resets */
2312 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2313 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2315 * we have finished working through the backlogged TSN's now
2316 * time to reset streams. 1: call reset function. 2: free
2317 * pending_reply space 3: distribute any chunks in
2318 * pending_reply_queue.
2320 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2321 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2322 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2323 SCTP_FREE(liste, SCTP_M_STRESET);
2324 /* sa_ignore FREED_MEMORY */
2325 liste = TAILQ_FIRST(&asoc->resetHead);
2326 if (TAILQ_EMPTY(&asoc->resetHead)) {
2327 /* All can be removed */
2328 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2329 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2330 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2334 if (need_reasm_check) {
2335 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2336 need_reasm_check = 0;
2340 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2341 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2345 * if control->sinfo_tsn is <= liste->tsn we
2346 * can process it which is the NOT of
2347 * control->sinfo_tsn > liste->tsn
2349 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2350 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2354 if (need_reasm_check) {
2355 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2356 need_reasm_check = 0;
2364 static const int8_t sctp_map_lookup_tab[256] = {
2365 0, 1, 0, 2, 0, 1, 0, 3,
2366 0, 1, 0, 2, 0, 1, 0, 4,
2367 0, 1, 0, 2, 0, 1, 0, 3,
2368 0, 1, 0, 2, 0, 1, 0, 5,
2369 0, 1, 0, 2, 0, 1, 0, 3,
2370 0, 1, 0, 2, 0, 1, 0, 4,
2371 0, 1, 0, 2, 0, 1, 0, 3,
2372 0, 1, 0, 2, 0, 1, 0, 6,
2373 0, 1, 0, 2, 0, 1, 0, 3,
2374 0, 1, 0, 2, 0, 1, 0, 4,
2375 0, 1, 0, 2, 0, 1, 0, 3,
2376 0, 1, 0, 2, 0, 1, 0, 5,
2377 0, 1, 0, 2, 0, 1, 0, 3,
2378 0, 1, 0, 2, 0, 1, 0, 4,
2379 0, 1, 0, 2, 0, 1, 0, 3,
2380 0, 1, 0, 2, 0, 1, 0, 7,
2381 0, 1, 0, 2, 0, 1, 0, 3,
2382 0, 1, 0, 2, 0, 1, 0, 4,
2383 0, 1, 0, 2, 0, 1, 0, 3,
2384 0, 1, 0, 2, 0, 1, 0, 5,
2385 0, 1, 0, 2, 0, 1, 0, 3,
2386 0, 1, 0, 2, 0, 1, 0, 4,
2387 0, 1, 0, 2, 0, 1, 0, 3,
2388 0, 1, 0, 2, 0, 1, 0, 6,
2389 0, 1, 0, 2, 0, 1, 0, 3,
2390 0, 1, 0, 2, 0, 1, 0, 4,
2391 0, 1, 0, 2, 0, 1, 0, 3,
2392 0, 1, 0, 2, 0, 1, 0, 5,
2393 0, 1, 0, 2, 0, 1, 0, 3,
2394 0, 1, 0, 2, 0, 1, 0, 4,
2395 0, 1, 0, 2, 0, 1, 0, 3,
2396 0, 1, 0, 2, 0, 1, 0, 8
2401 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2404 * Now we also need to check the mapping array in a couple of ways.
2405 * 1) Did we move the cum-ack point?
2407 * When you first glance at this you might think that all entries
2408 * that make up the position of the cum-ack would be in the
2409 * nr-mapping array only.. i.e. things up to the cum-ack are always
2410 * deliverable. Thats true with one exception, when its a fragmented
2411 * message we may not deliver the data until some threshold (or all
2412 * of it) is in place. So we must OR the nr_mapping_array and
2413 * mapping_array to get a true picture of the cum-ack.
2415 struct sctp_association *asoc;
2418 int slide_from, slide_end, lgap, distance;
2419 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2423 old_cumack = asoc->cumulative_tsn;
2424 old_base = asoc->mapping_array_base_tsn;
2425 old_highest = asoc->highest_tsn_inside_map;
2427 * We could probably improve this a small bit by calculating the
2428 * offset of the current cum-ack as the starting point.
2431 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2432 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2436 /* there is a 0 bit */
2437 at += sctp_map_lookup_tab[val];
2441 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2443 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2444 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2446 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2447 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2449 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2450 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2451 sctp_print_mapping_array(asoc);
2452 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2453 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2455 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2456 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2459 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2460 highest_tsn = asoc->highest_tsn_inside_nr_map;
2462 highest_tsn = asoc->highest_tsn_inside_map;
2464 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2465 /* The complete array was completed by a single FR */
2466 /* highest becomes the cum-ack */
2472 /* clear the array */
2473 clr = ((at + 7) >> 3);
2474 if (clr > asoc->mapping_array_size) {
2475 clr = asoc->mapping_array_size;
2477 memset(asoc->mapping_array, 0, clr);
2478 memset(asoc->nr_mapping_array, 0, clr);
2480 for (i = 0; i < asoc->mapping_array_size; i++) {
2481 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2482 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2483 sctp_print_mapping_array(asoc);
2487 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2488 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2489 } else if (at >= 8) {
2490 /* we can slide the mapping array down */
2491 /* slide_from holds where we hit the first NON 0xff byte */
2494 * now calculate the ceiling of the move using our highest
2497 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2498 slide_end = (lgap >> 3);
2499 if (slide_end < slide_from) {
2500 sctp_print_mapping_array(asoc);
2502 panic("impossible slide");
2504 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2505 lgap, slide_end, slide_from, at);
2509 if (slide_end > asoc->mapping_array_size) {
2511 panic("would overrun buffer");
2513 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2514 asoc->mapping_array_size, slide_end);
2515 slide_end = asoc->mapping_array_size;
2518 distance = (slide_end - slide_from) + 1;
2519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2520 sctp_log_map(old_base, old_cumack, old_highest,
2521 SCTP_MAP_PREPARE_SLIDE);
2522 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2523 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2525 if (distance + slide_from > asoc->mapping_array_size ||
2528 * Here we do NOT slide forward the array so that
2529 * hopefully when more data comes in to fill it up
2530 * we will be able to slide it forward. Really I
2531 * don't think this should happen :-0
2534 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2535 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2536 (uint32_t)asoc->mapping_array_size,
2537 SCTP_MAP_SLIDE_NONE);
2542 for (ii = 0; ii < distance; ii++) {
2543 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2544 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2547 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2548 asoc->mapping_array[ii] = 0;
2549 asoc->nr_mapping_array[ii] = 0;
2551 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2552 asoc->highest_tsn_inside_map += (slide_from << 3);
2554 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2555 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2557 asoc->mapping_array_base_tsn += (slide_from << 3);
2558 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2559 sctp_log_map(asoc->mapping_array_base_tsn,
2560 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2561 SCTP_MAP_SLIDE_RESULT);
2568 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2570 struct sctp_association *asoc;
2571 uint32_t highest_tsn;
2574 sctp_slide_mapping_arrays(stcb);
2576 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2577 highest_tsn = asoc->highest_tsn_inside_nr_map;
2579 highest_tsn = asoc->highest_tsn_inside_map;
2581 /* Is there a gap now? */
2582 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2585 * Now we need to see if we need to queue a sack or just start the
2586 * timer (if allowed).
2588 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2590 * Ok special case, in SHUTDOWN-SENT case. here we maker
2591 * sure SACK timer is off and instead send a SHUTDOWN and a
2594 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2595 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2596 stcb->sctp_ep, stcb, NULL,
2597 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2599 sctp_send_shutdown(stcb,
2600 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2602 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2606 * CMT DAC algorithm: increase number of packets received
2609 stcb->asoc.cmt_dac_pkts_rcvd++;
2611 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2613 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2615 (stcb->asoc.numduptsns) || /* we have dup's */
2616 (is_a_gap) || /* is still a gap */
2617 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2618 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2621 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2622 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2623 (stcb->asoc.send_sack == 0) &&
2624 (stcb->asoc.numduptsns == 0) &&
2625 (stcb->asoc.delayed_ack) &&
2626 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2629 * CMT DAC algorithm: With CMT, delay acks
2630 * even in the face of
2632 * reordering. Therefore, if acks that do
2633 * not have to be sent because of the above
2634 * reasons, will be delayed. That is, acks
2635 * that would have been sent due to gap
2636 * reports will be delayed with DAC. Start
2637 * the delayed ack timer.
2639 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2640 stcb->sctp_ep, stcb, NULL);
2643 * Ok we must build a SACK since the timer
2644 * is pending, we got our first packet OR
2645 * there are gaps or duplicates.
2647 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2648 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2651 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2652 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2653 stcb->sctp_ep, stcb, NULL);
2660 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2661 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2662 struct sctp_nets *net, uint32_t *high_tsn)
2664 struct sctp_chunkhdr *ch, chunk_buf;
2665 struct sctp_association *asoc;
2666 int num_chunks = 0; /* number of control chunks processed */
2668 int break_flag, last_chunk;
2669 int abort_flag = 0, was_a_gap;
2671 uint32_t highest_tsn;
2672 uint16_t chk_length;
2675 sctp_set_rwnd(stcb, &stcb->asoc);
2678 SCTP_TCB_LOCK_ASSERT(stcb);
2680 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2681 highest_tsn = asoc->highest_tsn_inside_nr_map;
2683 highest_tsn = asoc->highest_tsn_inside_map;
2685 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2687 * setup where we got the last DATA packet from for any SACK that
2688 * may need to go out. Don't bump the net. This is done ONLY when a
2689 * chunk is assigned.
2691 asoc->last_data_chunk_from = net;
2694 * Now before we proceed we must figure out if this is a wasted
2695 * cluster... i.e. it is a small packet sent in and yet the driver
2696 * underneath allocated a full cluster for it. If so we must copy it
2697 * to a smaller mbuf and free up the cluster mbuf. This will help
2698 * with cluster starvation. Note for __Panda__ we don't do this
2699 * since it has clusters all the way down to 64 bytes.
2701 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2702 /* we only handle mbufs that are singletons.. not chains */
2703 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2705 /* ok lets see if we can copy the data up */
2708 /* get the pointers and copy */
2709 to = mtod(m, caddr_t *);
2710 from = mtod((*mm), caddr_t *);
2711 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2712 /* copy the length and free up the old */
2713 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2715 /* success, back copy */
2718 /* We are in trouble in the mbuf world .. yikes */
2722 /* get pointer to the first chunk header */
2723 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2724 sizeof(struct sctp_chunkhdr),
2725 (uint8_t *)&chunk_buf);
2730 * process all DATA chunks...
2732 *high_tsn = asoc->cumulative_tsn;
2734 asoc->data_pkts_seen++;
2735 while (stop_proc == 0) {
2736 /* validate chunk length */
2737 chk_length = ntohs(ch->chunk_length);
2738 if (length - *offset < chk_length) {
2739 /* all done, mutulated chunk */
2743 if ((asoc->idata_supported == 1) &&
2744 (ch->chunk_type == SCTP_DATA)) {
2745 struct mbuf *op_err;
2746 char msg[SCTP_DIAG_INFO_LEN];
2748 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2749 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2750 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2751 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2754 if ((asoc->idata_supported == 0) &&
2755 (ch->chunk_type == SCTP_IDATA)) {
2756 struct mbuf *op_err;
2757 char msg[SCTP_DIAG_INFO_LEN];
2759 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2760 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2761 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2762 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2765 if ((ch->chunk_type == SCTP_DATA) ||
2766 (ch->chunk_type == SCTP_IDATA)) {
2769 if (ch->chunk_type == SCTP_DATA) {
2770 clen = sizeof(struct sctp_data_chunk);
2772 clen = sizeof(struct sctp_idata_chunk);
2774 if (chk_length < clen) {
2776 * Need to send an abort since we had a
2777 * invalid data chunk.
2779 struct mbuf *op_err;
2780 char msg[SCTP_DIAG_INFO_LEN];
2782 snprintf(msg, sizeof(msg), "%s chunk of length %u",
2783 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2785 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2786 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2787 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2790 #ifdef SCTP_AUDITING_ENABLED
2791 sctp_audit_log(0xB1, 0);
2793 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2798 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2799 chk_length, net, high_tsn, &abort_flag, &break_flag,
2800 last_chunk, ch->chunk_type)) {
2808 * Set because of out of rwnd space and no
2809 * drop rep space left.
2815 /* not a data chunk in the data region */
2816 switch (ch->chunk_type) {
2817 case SCTP_INITIATION:
2818 case SCTP_INITIATION_ACK:
2819 case SCTP_SELECTIVE_ACK:
2820 case SCTP_NR_SELECTIVE_ACK:
2821 case SCTP_HEARTBEAT_REQUEST:
2822 case SCTP_HEARTBEAT_ACK:
2823 case SCTP_ABORT_ASSOCIATION:
2825 case SCTP_SHUTDOWN_ACK:
2826 case SCTP_OPERATION_ERROR:
2827 case SCTP_COOKIE_ECHO:
2828 case SCTP_COOKIE_ACK:
2831 case SCTP_SHUTDOWN_COMPLETE:
2832 case SCTP_AUTHENTICATION:
2833 case SCTP_ASCONF_ACK:
2834 case SCTP_PACKET_DROPPED:
2835 case SCTP_STREAM_RESET:
2836 case SCTP_FORWARD_CUM_TSN:
2840 * Now, what do we do with KNOWN
2841 * chunks that are NOT in the right
2844 * For now, I do nothing but ignore
2845 * them. We may later want to add
2846 * sysctl stuff to switch out and do
2847 * either an ABORT() or possibly
2850 struct mbuf *op_err;
2851 char msg[SCTP_DIAG_INFO_LEN];
2853 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2855 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2856 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2861 * Unknown chunk type: use bit rules after
2864 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2866 * Need to send an abort since we
2867 * had a invalid chunk.
2869 struct mbuf *op_err;
2870 char msg[SCTP_DIAG_INFO_LEN];
2872 snprintf(msg, sizeof(msg), "Chunk of length %u",
2874 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2875 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2876 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2879 if (ch->chunk_type & 0x40) {
2880 /* Add a error report to the queue */
2881 struct mbuf *op_err;
2882 struct sctp_gen_error_cause *cause;
2884 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2885 0, M_NOWAIT, 1, MT_DATA);
2886 if (op_err != NULL) {
2887 cause = mtod(op_err, struct sctp_gen_error_cause *);
2888 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2889 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2890 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2891 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2892 if (SCTP_BUF_NEXT(op_err) != NULL) {
2893 sctp_queue_op_err(stcb, op_err);
2895 sctp_m_freem(op_err);
2899 if ((ch->chunk_type & 0x80) == 0) {
2900 /* discard the rest of this packet */
2902 } /* else skip this bad chunk and
2905 } /* switch of chunk type */
2907 *offset += SCTP_SIZE32(chk_length);
2908 if ((*offset >= length) || stop_proc) {
2909 /* no more data left in the mbuf chain */
2913 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2914 sizeof(struct sctp_chunkhdr),
2915 (uint8_t *)&chunk_buf);
2924 * we need to report rwnd overrun drops.
2926 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2930 * Did we get data, if so update the time for auto-close and
2931 * give peer credit for being alive.
2933 SCTP_STAT_INCR(sctps_recvpktwithdata);
2934 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2935 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2936 stcb->asoc.overall_error_count,
2938 SCTP_FROM_SCTP_INDATA,
2941 stcb->asoc.overall_error_count = 0;
2942 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2944 /* now service all of the reassm queue if needed */
2945 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2946 /* Assure that we ack right away */
2947 stcb->asoc.send_sack = 1;
2949 /* Start a sack timer or QUEUE a SACK for sending */
2950 sctp_sack_check(stcb, was_a_gap);
2955 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2956 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2958 uint32_t *biggest_newly_acked_tsn,
2959 uint32_t *this_sack_lowest_newack,
2962 struct sctp_tmit_chunk *tp1;
2963 unsigned int theTSN;
2964 int j, wake_him = 0, circled = 0;
2966 /* Recover the tp1 we last saw */
2969 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2971 for (j = frag_strt; j <= frag_end; j++) {
2972 theTSN = j + last_tsn;
2974 if (tp1->rec.data.doing_fast_retransmit)
2978 * CMT: CUCv2 algorithm. For each TSN being
2979 * processed from the sent queue, track the
2980 * next expected pseudo-cumack, or
2981 * rtx_pseudo_cumack, if required. Separate
2982 * cumack trackers for first transmissions,
2983 * and retransmissions.
2985 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2986 (tp1->whoTo->find_pseudo_cumack == 1) &&
2987 (tp1->snd_count == 1)) {
2988 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2989 tp1->whoTo->find_pseudo_cumack = 0;
2991 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2992 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2993 (tp1->snd_count > 1)) {
2994 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2995 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2997 if (tp1->rec.data.tsn == theTSN) {
2998 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3000 * must be held until
3003 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3005 * If it is less than RESEND, it is
3006 * now no-longer in flight.
3007 * Higher values may already be set
3008 * via previous Gap Ack Blocks...
3009 * i.e. ACKED or RESEND.
3011 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3012 *biggest_newly_acked_tsn)) {
3013 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3016 * CMT: SFR algo (and HTNA) - set
3017 * saw_newack to 1 for dest being
3018 * newly acked. update
3019 * this_sack_highest_newack if
3022 if (tp1->rec.data.chunk_was_revoked == 0)
3023 tp1->whoTo->saw_newack = 1;
3025 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3026 tp1->whoTo->this_sack_highest_newack)) {
3027 tp1->whoTo->this_sack_highest_newack =
3031 * CMT DAC algo: also update
3032 * this_sack_lowest_newack
3034 if (*this_sack_lowest_newack == 0) {
3035 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3036 sctp_log_sack(*this_sack_lowest_newack,
3041 SCTP_LOG_TSN_ACKED);
3043 *this_sack_lowest_newack = tp1->rec.data.tsn;
3046 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3047 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3048 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3049 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3050 * Separate pseudo_cumack trackers for first transmissions and
3053 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3054 if (tp1->rec.data.chunk_was_revoked == 0) {
3055 tp1->whoTo->new_pseudo_cumack = 1;
3057 tp1->whoTo->find_pseudo_cumack = 1;
3059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3060 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3062 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3063 if (tp1->rec.data.chunk_was_revoked == 0) {
3064 tp1->whoTo->new_pseudo_cumack = 1;
3066 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3068 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3069 sctp_log_sack(*biggest_newly_acked_tsn,
3074 SCTP_LOG_TSN_ACKED);
3076 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3077 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3078 tp1->whoTo->flight_size,
3080 (uint32_t)(uintptr_t)tp1->whoTo,
3083 sctp_flight_size_decrease(tp1);
3084 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3085 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3088 sctp_total_flight_decrease(stcb, tp1);
3090 tp1->whoTo->net_ack += tp1->send_size;
3091 if (tp1->snd_count < 2) {
3093 * True non-retransmitted chunk
3095 tp1->whoTo->net_ack2 += tp1->send_size;
3103 sctp_calculate_rto(stcb,
3106 &tp1->sent_rcv_time,
3107 SCTP_RTT_FROM_DATA);
3110 if (tp1->whoTo->rto_needed == 0) {
3111 tp1->whoTo->rto_needed = 1;
3118 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3119 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3120 stcb->asoc.this_sack_highest_gap)) {
3121 stcb->asoc.this_sack_highest_gap =
3124 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3125 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3126 #ifdef SCTP_AUDITING_ENABLED
3127 sctp_audit_log(0xB2,
3128 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3133 * All chunks NOT UNSENT fall through here and are marked
3134 * (leave PR-SCTP ones that are to skip alone though)
3136 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3137 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3138 tp1->sent = SCTP_DATAGRAM_MARKED;
3140 if (tp1->rec.data.chunk_was_revoked) {
3141 /* deflate the cwnd */
3142 tp1->whoTo->cwnd -= tp1->book_size;
3143 tp1->rec.data.chunk_was_revoked = 0;
3145 /* NR Sack code here */
3147 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3148 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3149 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3152 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3155 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3156 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3157 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3158 stcb->asoc.trigger_reset = 1;
3160 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3166 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3167 sctp_m_freem(tp1->data);
3174 } /* if (tp1->tsn == theTSN) */
3175 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3178 tp1 = TAILQ_NEXT(tp1, sctp_next);
3179 if ((tp1 == NULL) && (circled == 0)) {
3181 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3183 } /* end while (tp1) */
3186 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3188 /* In case the fragments were not in order we must reset */
3189 } /* end for (j = fragStart */
3191 return (wake_him); /* Return value only used for nr-sack */
3196 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3197 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3198 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3199 int num_seg, int num_nr_seg, int *rto_ok)
3201 struct sctp_gap_ack_block *frag, block;
3202 struct sctp_tmit_chunk *tp1;
3207 uint16_t frag_strt, frag_end, prev_frag_end;
3209 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3213 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3216 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3218 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3219 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3220 *offset += sizeof(block);
3222 return (chunk_freed);
3224 frag_strt = ntohs(frag->start);
3225 frag_end = ntohs(frag->end);
3227 if (frag_strt > frag_end) {
3228 /* This gap report is malformed, skip it. */
3231 if (frag_strt <= prev_frag_end) {
3232 /* This gap report is not in order, so restart. */
3233 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3235 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3236 *biggest_tsn_acked = last_tsn + frag_end;
3243 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3244 non_revocable, &num_frs, biggest_newly_acked_tsn,
3245 this_sack_lowest_newack, rto_ok)) {
3248 prev_frag_end = frag_end;
3250 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3252 sctp_log_fr(*biggest_tsn_acked,
3253 *biggest_newly_acked_tsn,
3254 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3256 return (chunk_freed);
3260 sctp_check_for_revoked(struct sctp_tcb *stcb,
3261 struct sctp_association *asoc, uint32_t cumack,
3262 uint32_t biggest_tsn_acked)
3264 struct sctp_tmit_chunk *tp1;
3266 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3267 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3269 * ok this guy is either ACK or MARKED. If it is
3270 * ACKED it has been previously acked but not this
3271 * time i.e. revoked. If it is MARKED it was ACK'ed
3274 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3277 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3278 /* it has been revoked */
3279 tp1->sent = SCTP_DATAGRAM_SENT;
3280 tp1->rec.data.chunk_was_revoked = 1;
3282 * We must add this stuff back in to assure
3283 * timers and such get started.
3285 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3286 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3287 tp1->whoTo->flight_size,
3289 (uint32_t)(uintptr_t)tp1->whoTo,
3292 sctp_flight_size_increase(tp1);
3293 sctp_total_flight_increase(stcb, tp1);
3295 * We inflate the cwnd to compensate for our
3296 * artificial inflation of the flight_size.
3298 tp1->whoTo->cwnd += tp1->book_size;
3299 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3300 sctp_log_sack(asoc->last_acked_seq,
3305 SCTP_LOG_TSN_REVOKED);
3307 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3308 /* it has been re-acked in this SACK */
3309 tp1->sent = SCTP_DATAGRAM_ACKED;
3312 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3319 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3320 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3322 struct sctp_tmit_chunk *tp1;
3323 int strike_flag = 0;
3325 int tot_retrans = 0;
3326 uint32_t sending_seq;
3327 struct sctp_nets *net;
3328 int num_dests_sacked = 0;
3331 * select the sending_seq, this is either the next thing ready to be
3332 * sent but not transmitted, OR, the next seq we assign.
3334 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3336 sending_seq = asoc->sending_seq;
3338 sending_seq = tp1->rec.data.tsn;
3341 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3342 if ((asoc->sctp_cmt_on_off > 0) &&
3343 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3344 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3345 if (net->saw_newack)
3349 if (stcb->asoc.prsctp_supported) {
3350 (void)SCTP_GETTIME_TIMEVAL(&now);
3352 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3354 if (tp1->no_fr_allowed) {
3355 /* this one had a timeout or something */
3358 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3359 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3360 sctp_log_fr(biggest_tsn_newly_acked,
3363 SCTP_FR_LOG_CHECK_STRIKE);
3365 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3366 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3370 if (stcb->asoc.prsctp_supported) {
3371 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3372 /* Is it expired? */
3373 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3374 /* Yes so drop it */
3375 if (tp1->data != NULL) {
3376 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3377 SCTP_SO_NOT_LOCKED);
3384 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3385 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3386 /* we are beyond the tsn in the sack */
3389 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3390 /* either a RESEND, ACKED, or MARKED */
3392 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3393 /* Continue strikin FWD-TSN chunks */
3394 tp1->rec.data.fwd_tsn_cnt++;
3399 * CMT : SFR algo (covers part of DAC and HTNA as well)
3401 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3403 * No new acks were receieved for data sent to this
3404 * dest. Therefore, according to the SFR algo for
3405 * CMT, no data sent to this dest can be marked for
3406 * FR using this SACK.
3409 } else if (tp1->whoTo &&
3410 SCTP_TSN_GT(tp1->rec.data.tsn,
3411 tp1->whoTo->this_sack_highest_newack) &&
3412 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3414 * CMT: New acks were receieved for data sent to
3415 * this dest. But no new acks were seen for data
3416 * sent after tp1. Therefore, according to the SFR
3417 * algo for CMT, tp1 cannot be marked for FR using
3418 * this SACK. This step covers part of the DAC algo
3419 * and the HTNA algo as well.
3424 * Here we check to see if we were have already done a FR
3425 * and if so we see if the biggest TSN we saw in the sack is
3426 * smaller than the recovery point. If so we don't strike
3427 * the tsn... otherwise we CAN strike the TSN.
3430 * @@@ JRI: Check for CMT if (accum_moved &&
3431 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3434 if (accum_moved && asoc->fast_retran_loss_recovery) {
3436 * Strike the TSN if in fast-recovery and cum-ack
3439 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3440 sctp_log_fr(biggest_tsn_newly_acked,
3443 SCTP_FR_LOG_STRIKE_CHUNK);
3445 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3448 if ((asoc->sctp_cmt_on_off > 0) &&
3449 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3451 * CMT DAC algorithm: If SACK flag is set to
3452 * 0, then lowest_newack test will not pass
3453 * because it would have been set to the
3454 * cumack earlier. If not already to be
3455 * rtx'd, If not a mixed sack and if tp1 is
3456 * not between two sacked TSNs, then mark by
3457 * one more. NOTE that we are marking by one
3458 * additional time since the SACK DAC flag
3459 * indicates that two packets have been
3460 * received after this missing TSN.
3462 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3463 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3464 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3465 sctp_log_fr(16 + num_dests_sacked,
3468 SCTP_FR_LOG_STRIKE_CHUNK);
3473 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3474 (asoc->sctp_cmt_on_off == 0)) {
3476 * For those that have done a FR we must take
3477 * special consideration if we strike. I.e the
3478 * biggest_newly_acked must be higher than the
3479 * sending_seq at the time we did the FR.
3482 #ifdef SCTP_FR_TO_ALTERNATE
3484 * If FR's go to new networks, then we must only do
3485 * this for singly homed asoc's. However if the FR's
3486 * go to the same network (Armando's work) then its
3487 * ok to FR multiple times.
3495 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3496 tp1->rec.data.fast_retran_tsn)) {
3498 * Strike the TSN, since this ack is
3499 * beyond where things were when we
3502 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3503 sctp_log_fr(biggest_tsn_newly_acked,
3506 SCTP_FR_LOG_STRIKE_CHUNK);
3508 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3512 if ((asoc->sctp_cmt_on_off > 0) &&
3513 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3515 * CMT DAC algorithm: If
3516 * SACK flag is set to 0,
3517 * then lowest_newack test
3518 * will not pass because it
3519 * would have been set to
3520 * the cumack earlier. If
3521 * not already to be rtx'd,
3522 * If not a mixed sack and
3523 * if tp1 is not between two
3524 * sacked TSNs, then mark by
3525 * one more. NOTE that we
3526 * are marking by one
3527 * additional time since the
3528 * SACK DAC flag indicates
3529 * that two packets have
3530 * been received after this
3533 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3534 (num_dests_sacked == 1) &&
3535 SCTP_TSN_GT(this_sack_lowest_newack,
3536 tp1->rec.data.tsn)) {
3537 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3538 sctp_log_fr(32 + num_dests_sacked,
3541 SCTP_FR_LOG_STRIKE_CHUNK);
3543 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3551 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3554 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3555 biggest_tsn_newly_acked)) {
3557 * We don't strike these: This is the HTNA
3558 * algorithm i.e. we don't strike If our TSN is
3559 * larger than the Highest TSN Newly Acked.
3563 /* Strike the TSN */
3564 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3565 sctp_log_fr(biggest_tsn_newly_acked,
3568 SCTP_FR_LOG_STRIKE_CHUNK);
3570 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3573 if ((asoc->sctp_cmt_on_off > 0) &&
3574 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3576 * CMT DAC algorithm: If SACK flag is set to
3577 * 0, then lowest_newack test will not pass
3578 * because it would have been set to the
3579 * cumack earlier. If not already to be
3580 * rtx'd, If not a mixed sack and if tp1 is
3581 * not between two sacked TSNs, then mark by
3582 * one more. NOTE that we are marking by one
3583 * additional time since the SACK DAC flag
3584 * indicates that two packets have been
3585 * received after this missing TSN.
3587 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3588 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3589 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3590 sctp_log_fr(48 + num_dests_sacked,
3593 SCTP_FR_LOG_STRIKE_CHUNK);
3599 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3600 struct sctp_nets *alt;
3602 /* fix counts and things */
3603 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3604 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3605 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3607 (uint32_t)(uintptr_t)tp1->whoTo,
3611 tp1->whoTo->net_ack++;
3612 sctp_flight_size_decrease(tp1);
3613 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3614 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3619 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3620 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3621 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3623 /* add back to the rwnd */
3624 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3626 /* remove from the total flight */
3627 sctp_total_flight_decrease(stcb, tp1);
3629 if ((stcb->asoc.prsctp_supported) &&
3630 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3632 * Has it been retransmitted tv_sec times? -
3633 * we store the retran count there.
3635 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3636 /* Yes, so drop it */
3637 if (tp1->data != NULL) {
3638 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3639 SCTP_SO_NOT_LOCKED);
3641 /* Make sure to flag we had a FR */
3642 if (tp1->whoTo != NULL) {
3643 tp1->whoTo->net_ack++;
3649 * SCTP_PRINTF("OK, we are now ready to FR this
3652 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3653 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3657 /* This is a subsequent FR */
3658 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3660 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3661 if (asoc->sctp_cmt_on_off > 0) {
3663 * CMT: Using RTX_SSTHRESH policy for CMT.
3664 * If CMT is being used, then pick dest with
3665 * largest ssthresh for any retransmission.
3667 tp1->no_fr_allowed = 1;
3669 /* sa_ignore NO_NULL_CHK */
3670 if (asoc->sctp_cmt_pf > 0) {
3672 * JRS 5/18/07 - If CMT PF is on,
3673 * use the PF version of
3676 alt = sctp_find_alternate_net(stcb, alt, 2);
3679 * JRS 5/18/07 - If only CMT is on,
3680 * use the CMT version of
3683 /* sa_ignore NO_NULL_CHK */
3684 alt = sctp_find_alternate_net(stcb, alt, 1);
3690 * CUCv2: If a different dest is picked for
3691 * the retransmission, then new
3692 * (rtx-)pseudo_cumack needs to be tracked
3693 * for orig dest. Let CUCv2 track new (rtx-)
3694 * pseudo-cumack always.
3697 tp1->whoTo->find_pseudo_cumack = 1;
3698 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3701 } else { /* CMT is OFF */
3703 #ifdef SCTP_FR_TO_ALTERNATE
3704 /* Can we find an alternate? */
3705 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3708 * default behavior is to NOT retransmit
3709 * FR's to an alternate. Armando Caro's
3710 * paper details why.
3716 tp1->rec.data.doing_fast_retransmit = 1;
3718 /* mark the sending seq for possible subsequent FR's */
3720 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3721 * (uint32_t)tpi->rec.data.tsn);
3723 if (TAILQ_EMPTY(&asoc->send_queue)) {
3725 * If the queue of send is empty then its
3726 * the next sequence number that will be
3727 * assigned so we subtract one from this to
3728 * get the one we last sent.
3730 tp1->rec.data.fast_retran_tsn = sending_seq;
3733 * If there are chunks on the send queue
3734 * (unsent data that has made it from the
3735 * stream queues but not out the door, we
3736 * take the first one (which will have the
3737 * lowest TSN) and subtract one to get the
3740 struct sctp_tmit_chunk *ttt;
3742 ttt = TAILQ_FIRST(&asoc->send_queue);
3743 tp1->rec.data.fast_retran_tsn =
3749 * this guy had a RTO calculation pending on
3752 if ((tp1->whoTo != NULL) &&
3753 (tp1->whoTo->rto_needed == 0)) {
3754 tp1->whoTo->rto_needed = 1;
3758 if (alt != tp1->whoTo) {
3759 /* yes, there is an alternate. */
3760 sctp_free_remote_addr(tp1->whoTo);
3761 /* sa_ignore FREED_MEMORY */
3763 atomic_add_int(&alt->ref_count, 1);
3769 struct sctp_tmit_chunk *
3770 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3771 struct sctp_association *asoc)
3773 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3777 if (asoc->prsctp_supported == 0) {
3780 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3781 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3782 tp1->sent != SCTP_DATAGRAM_RESEND &&
3783 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3784 /* no chance to advance, out of here */
3787 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3788 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3789 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3790 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3791 asoc->advanced_peer_ack_point,
3792 tp1->rec.data.tsn, 0, 0);
3795 if (!PR_SCTP_ENABLED(tp1->flags)) {
3797 * We can't fwd-tsn past any that are reliable aka
3798 * retransmitted until the asoc fails.
3803 (void)SCTP_GETTIME_TIMEVAL(&now);
3807 * now we got a chunk which is marked for another
3808 * retransmission to a PR-stream but has run out its chances
3809 * already maybe OR has been marked to skip now. Can we skip
3810 * it if its a resend?
3812 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3813 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3815 * Now is this one marked for resend and its time is
3818 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3819 /* Yes so drop it */
3821 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3822 1, SCTP_SO_NOT_LOCKED);
3826 * No, we are done when hit one for resend
3827 * whos time as not expired.
3833 * Ok now if this chunk is marked to drop it we can clean up
3834 * the chunk, advance our peer ack point and we can check
3837 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3838 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3839 /* advance PeerAckPoint goes forward */
3840 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3841 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3843 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3844 /* No update but we do save the chk */
3849 * If it is still in RESEND we can advance no
3859 sctp_fs_audit(struct sctp_association *asoc)
3861 struct sctp_tmit_chunk *chk;
3862 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3865 int entry_flight, entry_cnt;
3870 entry_flight = asoc->total_flight;
3871 entry_cnt = asoc->total_flight_count;
3873 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3876 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3877 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3878 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3883 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3885 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3887 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3894 if ((inflight > 0) || (inbetween > 0)) {
3896 panic("Flight size-express incorrect? \n");
3898 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3899 entry_flight, entry_cnt);
3901 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3902 inflight, inbetween, resend, above, acked);
3911 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3912 struct sctp_association *asoc,
3913 struct sctp_tmit_chunk *tp1)
3915 tp1->window_probe = 0;
3916 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3917 /* TSN's skipped we do NOT move back. */
3918 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3919 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3921 (uint32_t)(uintptr_t)tp1->whoTo,
3925 /* First setup this by shrinking flight */
3926 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3927 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3930 sctp_flight_size_decrease(tp1);
3931 sctp_total_flight_decrease(stcb, tp1);
3932 /* Now mark for resend */
3933 tp1->sent = SCTP_DATAGRAM_RESEND;
3934 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3936 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3937 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3938 tp1->whoTo->flight_size,
3940 (uint32_t)(uintptr_t)tp1->whoTo,
3946 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3947 uint32_t rwnd, int *abort_now, int ecne_seen)
3949 struct sctp_nets *net;
3950 struct sctp_association *asoc;
3951 struct sctp_tmit_chunk *tp1, *tp2;
3953 int win_probe_recovery = 0;
3954 int win_probe_recovered = 0;
3955 int j, done_once = 0;
3959 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3960 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3961 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3963 SCTP_TCB_LOCK_ASSERT(stcb);
3964 #ifdef SCTP_ASOCLOG_OF_TSNS
3965 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3966 stcb->asoc.cumack_log_at++;
3967 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3968 stcb->asoc.cumack_log_at = 0;
3972 old_rwnd = asoc->peers_rwnd;
3973 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3976 } else if (asoc->last_acked_seq == cumack) {
3977 /* Window update sack */
3978 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3979 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3980 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3981 /* SWS sender side engages */
3982 asoc->peers_rwnd = 0;
3984 if (asoc->peers_rwnd > old_rwnd) {
3990 /* First setup for CC stuff */
3991 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3992 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3993 /* Drag along the window_tsn for cwr's */
3994 net->cwr_window_tsn = cumack;
3996 net->prev_cwnd = net->cwnd;
4001 * CMT: Reset CUC and Fast recovery algo variables before
4004 net->new_pseudo_cumack = 0;
4005 net->will_exit_fast_recovery = 0;
4006 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4007 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4010 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4011 tp1 = TAILQ_LAST(&asoc->sent_queue,
4012 sctpchunk_listhead);
4013 send_s = tp1->rec.data.tsn + 1;
4015 send_s = asoc->sending_seq;
4017 if (SCTP_TSN_GE(cumack, send_s)) {
4018 struct mbuf *op_err;
4019 char msg[SCTP_DIAG_INFO_LEN];
4023 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4025 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4026 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4027 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4030 asoc->this_sack_highest_gap = cumack;
4031 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4032 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4033 stcb->asoc.overall_error_count,
4035 SCTP_FROM_SCTP_INDATA,
4038 stcb->asoc.overall_error_count = 0;
4039 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4040 /* process the new consecutive TSN first */
4041 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4042 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4043 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4044 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4046 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4048 * If it is less than ACKED, it is
4049 * now no-longer in flight. Higher
4050 * values may occur during marking
4052 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4053 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4054 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4055 tp1->whoTo->flight_size,
4057 (uint32_t)(uintptr_t)tp1->whoTo,
4060 sctp_flight_size_decrease(tp1);
4061 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4062 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4065 /* sa_ignore NO_NULL_CHK */
4066 sctp_total_flight_decrease(stcb, tp1);
4068 tp1->whoTo->net_ack += tp1->send_size;
4069 if (tp1->snd_count < 2) {
4071 * True non-retransmitted
4074 tp1->whoTo->net_ack2 +=
4077 /* update RTO too? */
4085 sctp_calculate_rto(stcb,
4087 &tp1->sent_rcv_time,
4088 SCTP_RTT_FROM_DATA);
4091 if (tp1->whoTo->rto_needed == 0) {
4092 tp1->whoTo->rto_needed = 1;
4098 * CMT: CUCv2 algorithm. From the
4099 * cumack'd TSNs, for each TSN being
4100 * acked for the first time, set the
4101 * following variables for the
4102 * corresp destination.
4103 * new_pseudo_cumack will trigger a
4105 * find_(rtx_)pseudo_cumack will
4106 * trigger search for the next
4107 * expected (rtx-)pseudo-cumack.
4109 tp1->whoTo->new_pseudo_cumack = 1;
4110 tp1->whoTo->find_pseudo_cumack = 1;
4111 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4113 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4114 /* sa_ignore NO_NULL_CHK */
4115 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4118 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4119 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4121 if (tp1->rec.data.chunk_was_revoked) {
4122 /* deflate the cwnd */
4123 tp1->whoTo->cwnd -= tp1->book_size;
4124 tp1->rec.data.chunk_was_revoked = 0;
4126 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4127 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4128 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4131 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4135 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4136 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4137 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4138 asoc->trigger_reset = 1;
4140 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4142 /* sa_ignore NO_NULL_CHK */
4143 sctp_free_bufspace(stcb, asoc, tp1, 1);
4144 sctp_m_freem(tp1->data);
4147 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4148 sctp_log_sack(asoc->last_acked_seq,
4153 SCTP_LOG_FREE_SENT);
4155 asoc->sent_queue_cnt--;
4156 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4163 /* sa_ignore NO_NULL_CHK */
4164 if (stcb->sctp_socket) {
4165 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4169 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4170 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4171 /* sa_ignore NO_NULL_CHK */
4172 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4174 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4175 so = SCTP_INP_SO(stcb->sctp_ep);
4176 atomic_add_int(&stcb->asoc.refcnt, 1);
4177 SCTP_TCB_UNLOCK(stcb);
4178 SCTP_SOCKET_LOCK(so, 1);
4179 SCTP_TCB_LOCK(stcb);
4180 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4181 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4182 /* assoc was freed while we were unlocked */
4183 SCTP_SOCKET_UNLOCK(so, 1);
4187 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4188 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4189 SCTP_SOCKET_UNLOCK(so, 1);
4192 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4193 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4197 /* JRS - Use the congestion control given in the CC module */
4198 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4199 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4200 if (net->net_ack2 > 0) {
4202 * Karn's rule applies to clearing error
4203 * count, this is optional.
4205 net->error_count = 0;
4206 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4207 /* addr came good */
4208 net->dest_state |= SCTP_ADDR_REACHABLE;
4209 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4210 0, (void *)net, SCTP_SO_NOT_LOCKED);
4212 if (net == stcb->asoc.primary_destination) {
4213 if (stcb->asoc.alternate) {
4215 * release the alternate,
4218 sctp_free_remote_addr(stcb->asoc.alternate);
4219 stcb->asoc.alternate = NULL;
4222 if (net->dest_state & SCTP_ADDR_PF) {
4223 net->dest_state &= ~SCTP_ADDR_PF;
4224 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4225 stcb->sctp_ep, stcb, net,
4226 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4227 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4228 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4229 /* Done with this net */
4232 /* restore any doubled timers */
4233 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4234 if (net->RTO < stcb->asoc.minrto) {
4235 net->RTO = stcb->asoc.minrto;
4237 if (net->RTO > stcb->asoc.maxrto) {
4238 net->RTO = stcb->asoc.maxrto;
4242 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4244 asoc->last_acked_seq = cumack;
4246 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4247 /* nothing left in-flight */
4248 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4249 net->flight_size = 0;
4250 net->partial_bytes_acked = 0;
4252 asoc->total_flight = 0;
4253 asoc->total_flight_count = 0;
4257 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4258 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4259 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4260 /* SWS sender side engages */
4261 asoc->peers_rwnd = 0;
4263 if (asoc->peers_rwnd > old_rwnd) {
4264 win_probe_recovery = 1;
4266 /* Now assure a timer where data is queued at */
4269 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4270 if (win_probe_recovery && (net->window_probe)) {
4271 win_probe_recovered = 1;
4273 * Find first chunk that was used with window probe
4274 * and clear the sent
4276 /* sa_ignore FREED_MEMORY */
4277 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4278 if (tp1->window_probe) {
4279 /* move back to data send queue */
4280 sctp_window_probe_recovery(stcb, asoc, tp1);
4285 if (net->flight_size) {
4287 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4288 if (net->window_probe) {
4289 net->window_probe = 0;
4292 if (net->window_probe) {
4294 * In window probes we must assure a timer
4295 * is still running there
4297 net->window_probe = 0;
4298 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4299 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4301 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4302 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4304 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4309 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4310 (asoc->sent_queue_retran_cnt == 0) &&
4311 (win_probe_recovered == 0) &&
4314 * huh, this should not happen unless all packets are
4315 * PR-SCTP and marked to skip of course.
4317 if (sctp_fs_audit(asoc)) {
4318 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4319 net->flight_size = 0;
4321 asoc->total_flight = 0;
4322 asoc->total_flight_count = 0;
4323 asoc->sent_queue_retran_cnt = 0;
4324 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4325 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4326 sctp_flight_size_increase(tp1);
4327 sctp_total_flight_increase(stcb, tp1);
4328 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4329 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4336 /**********************************/
4337 /* Now what about shutdown issues */
4338 /**********************************/
4339 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4340 /* nothing left on sendqueue.. consider done */
4342 if ((asoc->stream_queue_cnt == 1) &&
4343 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4344 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4345 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4346 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4348 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4349 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4350 (asoc->stream_queue_cnt == 1) &&
4351 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4352 struct mbuf *op_err;
4356 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4357 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4358 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4361 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4362 (asoc->stream_queue_cnt == 0)) {
4363 struct sctp_nets *netp;
4365 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4366 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4367 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4369 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4370 sctp_stop_timers_for_shutdown(stcb);
4371 if (asoc->alternate) {
4372 netp = asoc->alternate;
4374 netp = asoc->primary_destination;
4376 sctp_send_shutdown(stcb, netp);
4377 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4378 stcb->sctp_ep, stcb, netp);
4379 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4380 stcb->sctp_ep, stcb, netp);
4381 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4382 (asoc->stream_queue_cnt == 0)) {
4383 struct sctp_nets *netp;
4385 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4386 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4387 sctp_stop_timers_for_shutdown(stcb);
4388 if (asoc->alternate) {
4389 netp = asoc->alternate;
4391 netp = asoc->primary_destination;
4393 sctp_send_shutdown_ack(stcb, netp);
4394 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4395 stcb->sctp_ep, stcb, netp);
4398 /*********************************************/
4399 /* Here we perform PR-SCTP procedures */
4401 /*********************************************/
4402 /* C1. update advancedPeerAckPoint */
4403 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4404 asoc->advanced_peer_ack_point = cumack;
4406 /* PR-Sctp issues need to be addressed too */
4407 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4408 struct sctp_tmit_chunk *lchk;
4409 uint32_t old_adv_peer_ack_point;
4411 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4412 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4413 /* C3. See if we need to send a Fwd-TSN */
4414 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4416 * ISSUE with ECN, see FWD-TSN processing.
4418 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4419 send_forward_tsn(stcb, asoc);
4421 /* try to FR fwd-tsn's that get lost too */
4422 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4423 send_forward_tsn(stcb, asoc);
4428 /* Assure a timer is up */
4429 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4430 stcb->sctp_ep, stcb, lchk->whoTo);
4433 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4434 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4436 stcb->asoc.peers_rwnd,
4437 stcb->asoc.total_flight,
4438 stcb->asoc.total_output_queue_size);
4443 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4444 struct sctp_tcb *stcb,
4445 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4446 int *abort_now, uint8_t flags,
4447 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4449 struct sctp_association *asoc;
4450 struct sctp_tmit_chunk *tp1, *tp2;
4451 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4452 uint16_t wake_him = 0;
4453 uint32_t send_s = 0;
4455 int accum_moved = 0;
4456 int will_exit_fast_recovery = 0;
4457 uint32_t a_rwnd, old_rwnd;
4458 int win_probe_recovery = 0;
4459 int win_probe_recovered = 0;
4460 struct sctp_nets *net = NULL;
4463 uint8_t reneged_all = 0;
4464 uint8_t cmt_dac_flag;
4467 * we take any chance we can to service our queues since we cannot
4468 * get awoken when the socket is read from :<
4471 * Now perform the actual SACK handling: 1) Verify that it is not an
4472 * old sack, if so discard. 2) If there is nothing left in the send
4473 * queue (cum-ack is equal to last acked) then you have a duplicate
4474 * too, update any rwnd change and verify no timers are running.
4475 * then return. 3) Process any new consequtive data i.e. cum-ack
4476 * moved process these first and note that it moved. 4) Process any
4477 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4478 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4479 * sync up flightsizes and things, stop all timers and also check
4480 * for shutdown_pending state. If so then go ahead and send off the
4481 * shutdown. If in shutdown recv, send off the shutdown-ack and
4482 * start that timer, Ret. 9) Strike any non-acked things and do FR
4483 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4484 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4485 * if in shutdown_recv state.
4487 SCTP_TCB_LOCK_ASSERT(stcb);
4489 this_sack_lowest_newack = 0;
4490 SCTP_STAT_INCR(sctps_slowpath_sack);
4492 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4493 #ifdef SCTP_ASOCLOG_OF_TSNS
4494 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4495 stcb->asoc.cumack_log_at++;
4496 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4497 stcb->asoc.cumack_log_at = 0;
4502 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4503 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4504 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4507 old_rwnd = stcb->asoc.peers_rwnd;
4508 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4509 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4510 stcb->asoc.overall_error_count,
4512 SCTP_FROM_SCTP_INDATA,
4515 stcb->asoc.overall_error_count = 0;
4517 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4518 sctp_log_sack(asoc->last_acked_seq,
4525 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4527 uint32_t *dupdata, dblock;
4529 for (i = 0; i < num_dup; i++) {
4530 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4531 sizeof(uint32_t), (uint8_t *)&dblock);
4532 if (dupdata == NULL) {
4535 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4539 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4540 tp1 = TAILQ_LAST(&asoc->sent_queue,
4541 sctpchunk_listhead);
4542 send_s = tp1->rec.data.tsn + 1;
4545 send_s = asoc->sending_seq;
4547 if (SCTP_TSN_GE(cum_ack, send_s)) {
4548 struct mbuf *op_err;
4549 char msg[SCTP_DIAG_INFO_LEN];
4552 * no way, we have not even sent this TSN out yet. Peer is
4553 * hopelessly messed up with us.
4555 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4558 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4559 tp1->rec.data.tsn, (void *)tp1);
4564 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4566 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4567 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4568 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4571 /**********************/
4572 /* 1) check the range */
4573 /**********************/
4574 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4575 /* acking something behind */
4579 /* update the Rwnd of the peer */
4580 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4581 TAILQ_EMPTY(&asoc->send_queue) &&
4582 (asoc->stream_queue_cnt == 0)) {
4583 /* nothing left on send/sent and strmq */
4584 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4585 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4586 asoc->peers_rwnd, 0, 0, a_rwnd);
4588 asoc->peers_rwnd = a_rwnd;
4589 if (asoc->sent_queue_retran_cnt) {
4590 asoc->sent_queue_retran_cnt = 0;
4592 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4593 /* SWS sender side engages */
4594 asoc->peers_rwnd = 0;
4596 /* stop any timers */
4597 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4598 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4599 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4600 net->partial_bytes_acked = 0;
4601 net->flight_size = 0;
4603 asoc->total_flight = 0;
4604 asoc->total_flight_count = 0;
4608 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4609 * things. The total byte count acked is tracked in netAckSz AND
4610 * netAck2 is used to track the total bytes acked that are un-
4611 * amibguious and were never retransmitted. We track these on a per
4612 * destination address basis.
4614 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4615 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4616 /* Drag along the window_tsn for cwr's */
4617 net->cwr_window_tsn = cum_ack;
4619 net->prev_cwnd = net->cwnd;
4624 * CMT: Reset CUC and Fast recovery algo variables before
4627 net->new_pseudo_cumack = 0;
4628 net->will_exit_fast_recovery = 0;
4629 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4630 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4634 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4635 * to be greater than the cumack. Also reset saw_newack to 0
4638 net->saw_newack = 0;
4639 net->this_sack_highest_newack = last_tsn;
4641 /* process the new consecutive TSN first */
4642 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4643 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4644 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4646 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4648 * If it is less than ACKED, it is
4649 * now no-longer in flight. Higher
4650 * values may occur during marking
4652 if ((tp1->whoTo->dest_state &
4653 SCTP_ADDR_UNCONFIRMED) &&
4654 (tp1->snd_count < 2)) {
4656 * If there was no retran
4657 * and the address is
4658 * un-confirmed and we sent
4660 * sacked.. its confirmed,
4663 tp1->whoTo->dest_state &=
4664 ~SCTP_ADDR_UNCONFIRMED;
4666 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4667 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4668 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4669 tp1->whoTo->flight_size,
4671 (uint32_t)(uintptr_t)tp1->whoTo,
4674 sctp_flight_size_decrease(tp1);
4675 sctp_total_flight_decrease(stcb, tp1);
4676 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4677 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4681 tp1->whoTo->net_ack += tp1->send_size;
4683 /* CMT SFR and DAC algos */
4684 this_sack_lowest_newack = tp1->rec.data.tsn;
4685 tp1->whoTo->saw_newack = 1;
4687 if (tp1->snd_count < 2) {
4689 * True non-retransmitted
4692 tp1->whoTo->net_ack2 +=
4695 /* update RTO too? */
4699 sctp_calculate_rto(stcb,
4701 &tp1->sent_rcv_time,
4702 SCTP_RTT_FROM_DATA);
4705 if (tp1->whoTo->rto_needed == 0) {
4706 tp1->whoTo->rto_needed = 1;
4712 * CMT: CUCv2 algorithm. From the
4713 * cumack'd TSNs, for each TSN being
4714 * acked for the first time, set the
4715 * following variables for the
4716 * corresp destination.
4717 * new_pseudo_cumack will trigger a
4719 * find_(rtx_)pseudo_cumack will
4720 * trigger search for the next
4721 * expected (rtx-)pseudo-cumack.
4723 tp1->whoTo->new_pseudo_cumack = 1;
4724 tp1->whoTo->find_pseudo_cumack = 1;
4725 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4728 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4729 sctp_log_sack(asoc->last_acked_seq,
4734 SCTP_LOG_TSN_ACKED);
4736 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4737 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4740 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4741 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4742 #ifdef SCTP_AUDITING_ENABLED
4743 sctp_audit_log(0xB3,
4744 (asoc->sent_queue_retran_cnt & 0x000000ff));
4747 if (tp1->rec.data.chunk_was_revoked) {
4748 /* deflate the cwnd */
4749 tp1->whoTo->cwnd -= tp1->book_size;
4750 tp1->rec.data.chunk_was_revoked = 0;
4752 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4753 tp1->sent = SCTP_DATAGRAM_ACKED;
4760 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4761 /* always set this up to cum-ack */
4762 asoc->this_sack_highest_gap = last_tsn;
4764 if ((num_seg > 0) || (num_nr_seg > 0)) {
4767 * thisSackHighestGap will increase while handling NEW
4768 * segments this_sack_highest_newack will increase while
4769 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4770 * used for CMT DAC algo. saw_newack will also change.
4772 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4773 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4774 num_seg, num_nr_seg, &rto_ok)) {
4778 * validate the biggest_tsn_acked in the gap acks if strict
4779 * adherence is wanted.
4781 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4783 * peer is either confused or we are under attack.
4786 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4787 biggest_tsn_acked, send_s);
4791 /*******************************************/
4792 /* cancel ALL T3-send timer if accum moved */
4793 /*******************************************/
4794 if (asoc->sctp_cmt_on_off > 0) {
4795 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4796 if (net->new_pseudo_cumack)
4797 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4799 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4804 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4805 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4806 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4810 /********************************************/
4811 /* drop the acked chunks from the sentqueue */
4812 /********************************************/
4813 asoc->last_acked_seq = cum_ack;
4815 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4816 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4819 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4820 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4821 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4824 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4828 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4829 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4830 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4831 asoc->trigger_reset = 1;
4833 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4834 if (PR_SCTP_ENABLED(tp1->flags)) {
4835 if (asoc->pr_sctp_cnt != 0)
4836 asoc->pr_sctp_cnt--;
4838 asoc->sent_queue_cnt--;
4840 /* sa_ignore NO_NULL_CHK */
4841 sctp_free_bufspace(stcb, asoc, tp1, 1);
4842 sctp_m_freem(tp1->data);
4844 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4845 asoc->sent_queue_cnt_removeable--;
4848 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4849 sctp_log_sack(asoc->last_acked_seq,
4854 SCTP_LOG_FREE_SENT);
4856 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4859 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4861 panic("Warning flight size is positive and should be 0");
4863 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4864 asoc->total_flight);
4866 asoc->total_flight = 0;
4869 /* sa_ignore NO_NULL_CHK */
4870 if ((wake_him) && (stcb->sctp_socket)) {
4871 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4875 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4876 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4877 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4879 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4880 so = SCTP_INP_SO(stcb->sctp_ep);
4881 atomic_add_int(&stcb->asoc.refcnt, 1);
4882 SCTP_TCB_UNLOCK(stcb);
4883 SCTP_SOCKET_LOCK(so, 1);
4884 SCTP_TCB_LOCK(stcb);
4885 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4886 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4887 /* assoc was freed while we were unlocked */
4888 SCTP_SOCKET_UNLOCK(so, 1);
4892 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4893 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4894 SCTP_SOCKET_UNLOCK(so, 1);
4897 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4898 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4902 if (asoc->fast_retran_loss_recovery && accum_moved) {
4903 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4904 /* Setup so we will exit RFC2582 fast recovery */
4905 will_exit_fast_recovery = 1;
4909 * Check for revoked fragments:
4911 * if Previous sack - Had no frags then we can't have any revoked if
4912 * Previous sack - Had frag's then - If we now have frags aka
4913 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4914 * some of them. else - The peer revoked all ACKED fragments, since
4915 * we had some before and now we have NONE.
4919 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4920 asoc->saw_sack_with_frags = 1;
4921 } else if (asoc->saw_sack_with_frags) {
4922 int cnt_revoked = 0;
4924 /* Peer revoked all dg's marked or acked */
4925 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4926 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4927 tp1->sent = SCTP_DATAGRAM_SENT;
4928 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4929 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4930 tp1->whoTo->flight_size,
4932 (uint32_t)(uintptr_t)tp1->whoTo,
4935 sctp_flight_size_increase(tp1);
4936 sctp_total_flight_increase(stcb, tp1);
4937 tp1->rec.data.chunk_was_revoked = 1;
4939 * To ensure that this increase in
4940 * flightsize, which is artificial, does not
4941 * throttle the sender, we also increase the
4942 * cwnd artificially.
4944 tp1->whoTo->cwnd += tp1->book_size;
4951 asoc->saw_sack_with_frags = 0;
4954 asoc->saw_sack_with_nr_frags = 1;
4956 asoc->saw_sack_with_nr_frags = 0;
4958 /* JRS - Use the congestion control given in the CC module */
4959 if (ecne_seen == 0) {
4960 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4961 if (net->net_ack2 > 0) {
4963 * Karn's rule applies to clearing error
4964 * count, this is optional.
4966 net->error_count = 0;
4967 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4968 /* addr came good */
4969 net->dest_state |= SCTP_ADDR_REACHABLE;
4970 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4971 0, (void *)net, SCTP_SO_NOT_LOCKED);
4974 if (net == stcb->asoc.primary_destination) {
4975 if (stcb->asoc.alternate) {
4977 * release the alternate,
4980 sctp_free_remote_addr(stcb->asoc.alternate);
4981 stcb->asoc.alternate = NULL;
4985 if (net->dest_state & SCTP_ADDR_PF) {
4986 net->dest_state &= ~SCTP_ADDR_PF;
4987 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4988 stcb->sctp_ep, stcb, net,
4989 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4990 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4991 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4992 /* Done with this net */
4995 /* restore any doubled timers */
4996 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4997 if (net->RTO < stcb->asoc.minrto) {
4998 net->RTO = stcb->asoc.minrto;
5000 if (net->RTO > stcb->asoc.maxrto) {
5001 net->RTO = stcb->asoc.maxrto;
5005 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5008 if (TAILQ_EMPTY(&asoc->sent_queue)) {
5009 /* nothing left in-flight */
5010 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5011 /* stop all timers */
5012 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5014 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5015 net->flight_size = 0;
5016 net->partial_bytes_acked = 0;
5018 asoc->total_flight = 0;
5019 asoc->total_flight_count = 0;
5022 /**********************************/
5023 /* Now what about shutdown issues */
5024 /**********************************/
5025 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5026 /* nothing left on sendqueue.. consider done */
5027 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5028 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5029 asoc->peers_rwnd, 0, 0, a_rwnd);
5031 asoc->peers_rwnd = a_rwnd;
5032 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5033 /* SWS sender side engages */
5034 asoc->peers_rwnd = 0;
5037 if ((asoc->stream_queue_cnt == 1) &&
5038 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5039 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5040 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5041 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5043 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5044 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5045 (asoc->stream_queue_cnt == 1) &&
5046 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5047 struct mbuf *op_err;
5051 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5052 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5053 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5056 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5057 (asoc->stream_queue_cnt == 0)) {
5058 struct sctp_nets *netp;
5060 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5061 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5062 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5064 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5065 sctp_stop_timers_for_shutdown(stcb);
5066 if (asoc->alternate) {
5067 netp = asoc->alternate;
5069 netp = asoc->primary_destination;
5071 sctp_send_shutdown(stcb, netp);
5072 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5073 stcb->sctp_ep, stcb, netp);
5074 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5075 stcb->sctp_ep, stcb, netp);
5077 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5078 (asoc->stream_queue_cnt == 0)) {
5079 struct sctp_nets *netp;
5081 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5082 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5083 sctp_stop_timers_for_shutdown(stcb);
5084 if (asoc->alternate) {
5085 netp = asoc->alternate;
5087 netp = asoc->primary_destination;
5089 sctp_send_shutdown_ack(stcb, netp);
5090 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5091 stcb->sctp_ep, stcb, netp);
5096 * Now here we are going to recycle net_ack for a different use...
5099 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5104 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5105 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5106 * automatically ensure that.
5108 if ((asoc->sctp_cmt_on_off > 0) &&
5109 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5110 (cmt_dac_flag == 0)) {
5111 this_sack_lowest_newack = cum_ack;
5113 if ((num_seg > 0) || (num_nr_seg > 0)) {
5114 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5115 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5117 /* JRS - Use the congestion control given in the CC module */
5118 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5120 /* Now are we exiting loss recovery ? */
5121 if (will_exit_fast_recovery) {
5122 /* Ok, we must exit fast recovery */
5123 asoc->fast_retran_loss_recovery = 0;
5125 if ((asoc->sat_t3_loss_recovery) &&
5126 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5127 /* end satellite t3 loss recovery */
5128 asoc->sat_t3_loss_recovery = 0;
5133 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5134 if (net->will_exit_fast_recovery) {
5135 /* Ok, we must exit fast recovery */
5136 net->fast_retran_loss_recovery = 0;
5140 /* Adjust and set the new rwnd value */
5141 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5142 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5143 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5145 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5146 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5147 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5148 /* SWS sender side engages */
5149 asoc->peers_rwnd = 0;
5151 if (asoc->peers_rwnd > old_rwnd) {
5152 win_probe_recovery = 1;
5156 * Now we must setup so we have a timer up for anyone with
5162 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5163 if (win_probe_recovery && (net->window_probe)) {
5164 win_probe_recovered = 1;
5166 * Find first chunk that was used with
5167 * window probe and clear the event. Put
5168 * it back into the send queue as if has
5171 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5172 if (tp1->window_probe) {
5173 sctp_window_probe_recovery(stcb, asoc, tp1);
5178 if (net->flight_size) {
5180 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5181 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5182 stcb->sctp_ep, stcb, net);
5184 if (net->window_probe) {
5185 net->window_probe = 0;
5188 if (net->window_probe) {
5190 * In window probes we must assure a timer
5191 * is still running there
5193 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5194 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5195 stcb->sctp_ep, stcb, net);
5198 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5199 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5201 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5206 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5207 (asoc->sent_queue_retran_cnt == 0) &&
5208 (win_probe_recovered == 0) &&
5211 * huh, this should not happen unless all packets are
5212 * PR-SCTP and marked to skip of course.
5214 if (sctp_fs_audit(asoc)) {
5215 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5216 net->flight_size = 0;
5218 asoc->total_flight = 0;
5219 asoc->total_flight_count = 0;
5220 asoc->sent_queue_retran_cnt = 0;
5221 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5222 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5223 sctp_flight_size_increase(tp1);
5224 sctp_total_flight_increase(stcb, tp1);
5225 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5226 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5233 /*********************************************/
5234 /* Here we perform PR-SCTP procedures */
5236 /*********************************************/
5237 /* C1. update advancedPeerAckPoint */
5238 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5239 asoc->advanced_peer_ack_point = cum_ack;
5241 /* C2. try to further move advancedPeerAckPoint ahead */
5242 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5243 struct sctp_tmit_chunk *lchk;
5244 uint32_t old_adv_peer_ack_point;
5246 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5247 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5248 /* C3. See if we need to send a Fwd-TSN */
5249 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5251 * ISSUE with ECN, see FWD-TSN processing.
5253 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5254 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5255 0xee, cum_ack, asoc->advanced_peer_ack_point,
5256 old_adv_peer_ack_point);
5258 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5259 send_forward_tsn(stcb, asoc);
5261 /* try to FR fwd-tsn's that get lost too */
5262 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5263 send_forward_tsn(stcb, asoc);
5268 /* Assure a timer is up */
5269 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5270 stcb->sctp_ep, stcb, lchk->whoTo);
5273 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5274 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5276 stcb->asoc.peers_rwnd,
5277 stcb->asoc.total_flight,
5278 stcb->asoc.total_output_queue_size);
5283 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5286 uint32_t cum_ack, a_rwnd;
5288 cum_ack = ntohl(cp->cumulative_tsn_ack);
5289 /* Arrange so a_rwnd does NOT change */
5290 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5292 /* Now call the express sack handling */
5293 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5297 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5298 struct sctp_stream_in *strmin)
5300 struct sctp_queued_to_read *control, *ncontrol;
5301 struct sctp_association *asoc;
5303 int need_reasm_check = 0;
5306 mid = strmin->last_mid_delivered;
5308 * First deliver anything prior to and including the stream no that
5311 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5312 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5313 /* this is deliverable now */
5314 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5315 if (control->on_strm_q) {
5316 if (control->on_strm_q == SCTP_ON_ORDERED) {
5317 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5318 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5319 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5322 panic("strmin: %p ctl: %p unknown %d",
5323 strmin, control, control->on_strm_q);
5326 control->on_strm_q = 0;
5328 /* subtract pending on streams */
5329 if (asoc->size_on_all_streams >= control->length) {
5330 asoc->size_on_all_streams -= control->length;
5333 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5335 asoc->size_on_all_streams = 0;
5338 sctp_ucount_decr(asoc->cnt_on_all_streams);
5339 /* deliver it to at least the delivery-q */
5340 if (stcb->sctp_socket) {
5341 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5342 sctp_add_to_readq(stcb->sctp_ep, stcb,
5344 &stcb->sctp_socket->so_rcv,
5345 1, SCTP_READ_LOCK_HELD,
5346 SCTP_SO_NOT_LOCKED);
5349 /* Its a fragmented message */
5350 if (control->first_frag_seen) {
5352 * Make it so this is next to
5353 * deliver, we restore later
5355 strmin->last_mid_delivered = control->mid - 1;
5356 need_reasm_check = 1;
5361 /* no more delivery now. */
5365 if (need_reasm_check) {
5368 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5369 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5370 /* Restore the next to deliver unless we are ahead */
5371 strmin->last_mid_delivered = mid;
5374 /* Left the front Partial one on */
5377 need_reasm_check = 0;
5380 * now we must deliver things in queue the normal way if any are
5383 mid = strmin->last_mid_delivered + 1;
5384 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5385 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5386 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5387 /* this is deliverable now */
5388 if (control->on_strm_q) {
5389 if (control->on_strm_q == SCTP_ON_ORDERED) {
5390 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5391 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5392 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5395 panic("strmin: %p ctl: %p unknown %d",
5396 strmin, control, control->on_strm_q);
5399 control->on_strm_q = 0;
5401 /* subtract pending on streams */
5402 if (asoc->size_on_all_streams >= control->length) {
5403 asoc->size_on_all_streams -= control->length;
5406 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5408 asoc->size_on_all_streams = 0;
5411 sctp_ucount_decr(asoc->cnt_on_all_streams);
5412 /* deliver it to at least the delivery-q */
5413 strmin->last_mid_delivered = control->mid;
5414 if (stcb->sctp_socket) {
5415 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5416 sctp_add_to_readq(stcb->sctp_ep, stcb,
5418 &stcb->sctp_socket->so_rcv, 1,
5419 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5422 mid = strmin->last_mid_delivered + 1;
5424 /* Its a fragmented message */
5425 if (control->first_frag_seen) {
5427 * Make it so this is next to
5430 strmin->last_mid_delivered = control->mid - 1;
5431 need_reasm_check = 1;
5439 if (need_reasm_check) {
5440 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5447 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5448 struct sctp_association *asoc,
5449 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5451 struct sctp_queued_to_read *control;
5452 struct sctp_stream_in *strm;
5453 struct sctp_tmit_chunk *chk, *nchk;
5454 int cnt_removed = 0;
5457 * For now large messages held on the stream reasm that are complete
5458 * will be tossed too. We could in theory do more work to spin
5459 * through and stop after dumping one msg aka seeing the start of a
5460 * new msg at the head, and call the delivery function... to see if
5461 * it can be delivered... But for now we just dump everything on the
5464 strm = &asoc->strmin[stream];
5465 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5466 if (control == NULL) {
5470 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5473 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5474 /* Purge hanging chunks */
5475 if (!asoc->idata_supported && (ordered == 0)) {
5476 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5481 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5482 if (asoc->size_on_reasm_queue >= chk->send_size) {
5483 asoc->size_on_reasm_queue -= chk->send_size;
5486 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5488 asoc->size_on_reasm_queue = 0;
5491 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5493 sctp_m_freem(chk->data);
5496 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5498 if (!TAILQ_EMPTY(&control->reasm)) {
5499 /* This has to be old data, unordered */
5500 if (control->data) {
5501 sctp_m_freem(control->data);
5502 control->data = NULL;
5504 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5505 chk = TAILQ_FIRST(&control->reasm);
5506 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5507 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5508 sctp_add_chk_to_control(control, strm, stcb, asoc,
5509 chk, SCTP_READ_LOCK_HELD);
5511 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5514 if (control->on_strm_q == SCTP_ON_ORDERED) {
5515 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5516 if (asoc->size_on_all_streams >= control->length) {
5517 asoc->size_on_all_streams -= control->length;
5520 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5522 asoc->size_on_all_streams = 0;
5525 sctp_ucount_decr(asoc->cnt_on_all_streams);
5526 control->on_strm_q = 0;
5527 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5528 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5529 control->on_strm_q = 0;
5531 } else if (control->on_strm_q) {
5532 panic("strm: %p ctl: %p unknown %d",
5533 strm, control, control->on_strm_q);
5536 control->on_strm_q = 0;
5537 if (control->on_read_q == 0) {
5538 sctp_free_remote_addr(control->whoFrom);
5539 if (control->data) {
5540 sctp_m_freem(control->data);
5541 control->data = NULL;
5543 sctp_free_a_readq(stcb, control);
5548 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5549 struct sctp_forward_tsn_chunk *fwd,
5550 int *abort_flag, struct mbuf *m, int offset)
5552 /* The pr-sctp fwd tsn */
5554 * here we will perform all the data receiver side steps for
5555 * processing FwdTSN, as required in by pr-sctp draft:
5557 * Assume we get FwdTSN(x):
5559 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5560 * + others we have 3) examine and update re-ordering queue on
5561 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5562 * report where we are.
5564 struct sctp_association *asoc;
5565 uint32_t new_cum_tsn, gap;
5566 unsigned int i, fwd_sz, m_size;
5568 struct sctp_stream_in *strm;
5569 struct sctp_queued_to_read *control, *sv;
5572 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5573 SCTPDBG(SCTP_DEBUG_INDATA1,
5574 "Bad size too small/big fwd-tsn\n");
5577 m_size = (stcb->asoc.mapping_array_size << 3);
5578 /*************************************************************/
5579 /* 1. Here we update local cumTSN and shift the bitmap array */
5580 /*************************************************************/
5581 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5583 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5584 /* Already got there ... */
5588 * now we know the new TSN is more advanced, let's find the actual
5591 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5592 asoc->cumulative_tsn = new_cum_tsn;
5593 if (gap >= m_size) {
5594 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5595 struct mbuf *op_err;
5596 char msg[SCTP_DIAG_INFO_LEN];
5599 * out of range (of single byte chunks in the rwnd I
5600 * give out). This must be an attacker.
5603 snprintf(msg, sizeof(msg),
5604 "New cum ack %8.8x too high, highest TSN %8.8x",
5605 new_cum_tsn, asoc->highest_tsn_inside_map);
5606 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5607 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5608 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5611 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5613 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5614 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5615 asoc->highest_tsn_inside_map = new_cum_tsn;
5617 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5618 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5620 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5621 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5624 SCTP_TCB_LOCK_ASSERT(stcb);
5625 for (i = 0; i <= gap; i++) {
5626 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5627 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5628 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5629 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5630 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5635 /*************************************************************/
5636 /* 2. Clear up re-assembly queue */
5637 /*************************************************************/
5639 /* This is now done as part of clearing up the stream/seq */
5640 if (asoc->idata_supported == 0) {
5643 /* Flush all the un-ordered data based on cum-tsn */
5644 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5645 for (sid = 0; sid < asoc->streamincnt; sid++) {
5646 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5648 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5650 /*******************************************************/
5651 /* 3. Update the PR-stream re-ordering queues and fix */
5652 /* delivery issues as needed. */
5653 /*******************************************************/
5654 fwd_sz -= sizeof(*fwd);
5657 unsigned int num_str;
5658 uint32_t mid, cur_mid;
5660 uint16_t ordered, flags;
5661 struct sctp_strseq *stseq, strseqbuf;
5662 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5664 offset += sizeof(*fwd);
5666 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5667 if (asoc->idata_supported) {
5668 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5670 num_str = fwd_sz / sizeof(struct sctp_strseq);
5672 for (i = 0; i < num_str; i++) {
5673 if (asoc->idata_supported) {
5674 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5675 sizeof(struct sctp_strseq_mid),
5676 (uint8_t *)&strseqbuf_m);
5677 offset += sizeof(struct sctp_strseq_mid);
5678 if (stseq_m == NULL) {
5681 sid = ntohs(stseq_m->sid);
5682 mid = ntohl(stseq_m->mid);
5683 flags = ntohs(stseq_m->flags);
5684 if (flags & PR_SCTP_UNORDERED_FLAG) {
5690 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5691 sizeof(struct sctp_strseq),
5692 (uint8_t *)&strseqbuf);
5693 offset += sizeof(struct sctp_strseq);
5694 if (stseq == NULL) {
5697 sid = ntohs(stseq->sid);
5698 mid = (uint32_t)ntohs(stseq->ssn);
5706 * Ok we now look for the stream/seq on the read
5707 * queue where its not all delivered. If we find it
5708 * we transmute the read entry into a PDI_ABORTED.
5710 if (sid >= asoc->streamincnt) {
5711 /* screwed up streams, stop! */
5714 if ((asoc->str_of_pdapi == sid) &&
5715 (asoc->ssn_of_pdapi == mid)) {
5717 * If this is the one we were partially
5718 * delivering now then we no longer are.
5719 * Note this will change with the reassembly
5722 asoc->fragmented_delivery_inprogress = 0;
5724 strm = &asoc->strmin[sid];
5725 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5726 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5728 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5729 if ((control->sinfo_stream == sid) &&
5730 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5731 str_seq = (sid << 16) | (0x0000ffff & mid);
5732 control->pdapi_aborted = 1;
5733 sv = stcb->asoc.control_pdapi;
5734 control->end_added = 1;
5735 if (control->on_strm_q == SCTP_ON_ORDERED) {
5736 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5737 if (asoc->size_on_all_streams >= control->length) {
5738 asoc->size_on_all_streams -= control->length;
5741 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5743 asoc->size_on_all_streams = 0;
5746 sctp_ucount_decr(asoc->cnt_on_all_streams);
5747 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5748 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5750 } else if (control->on_strm_q) {
5751 panic("strm: %p ctl: %p unknown %d",
5752 strm, control, control->on_strm_q);
5755 control->on_strm_q = 0;
5756 stcb->asoc.control_pdapi = control;
5757 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5759 SCTP_PARTIAL_DELIVERY_ABORTED,
5761 SCTP_SO_NOT_LOCKED);
5762 stcb->asoc.control_pdapi = sv;
5764 } else if ((control->sinfo_stream == sid) &&
5765 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5766 /* We are past our victim SSN */
5770 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5771 /* Update the sequence number */
5772 strm->last_mid_delivered = mid;
5774 /* now kick the stream the new way */
5775 /* sa_ignore NO_NULL_CHK */
5776 sctp_kick_prsctp_reorder_queue(stcb, strm);
5778 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5781 * Now slide thing forward.
5783 sctp_slide_mapping_arrays(stcb);