2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int lock_held);
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
78 /* Calculate what the rwnd would be */
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
85 * This is really set wrong with respect to a 1-2-m socket. Since
86 * the sb_cc is the count that everyone as put up. When we re-write
87 * sctp_soreceive then we will fix this so that ONLY this
88 * associations data is taken into account.
90 if (stcb->sctp_socket == NULL) {
94 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 if (stcb->asoc.sb_cc == 0 &&
99 asoc->cnt_on_reasm_queue == 0 &&
100 asoc->cnt_on_all_streams == 0) {
101 /* Full rwnd granted */
102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
105 /* get actual space */
106 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
108 * take out what has NOT been put on socket queue and we yet hold
111 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 asoc->cnt_on_reasm_queue * MSIZE));
113 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 asoc->cnt_on_all_streams * MSIZE));
120 /* what is the overhead of all these rwnd's */
121 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
123 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 * even it is 0. SWS engaged
126 if (calc < stcb->asoc.my_rwnd_control_len) {
135 * Build out our readq entry based on the incoming packet.
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139 struct sctp_nets *net,
140 uint32_t tsn, uint32_t ppid,
141 uint32_t context, uint16_t sid,
142 uint32_t mid, uint8_t flags,
145 struct sctp_queued_to_read *read_queue_e = NULL;
147 sctp_alloc_a_readq(stcb, read_queue_e);
148 if (read_queue_e == NULL) {
151 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 read_queue_e->sinfo_stream = sid;
153 read_queue_e->sinfo_flags = (flags << 8);
154 read_queue_e->sinfo_ppid = ppid;
155 read_queue_e->sinfo_context = context;
156 read_queue_e->sinfo_tsn = tsn;
157 read_queue_e->sinfo_cumtsn = tsn;
158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 read_queue_e->mid = mid;
160 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 TAILQ_INIT(&read_queue_e->reasm);
162 read_queue_e->whoFrom = net;
163 atomic_add_int(&net->ref_count, 1);
164 read_queue_e->data = dm;
165 read_queue_e->stcb = stcb;
166 read_queue_e->port_from = stcb->rport;
167 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
168 read_queue_e->do_not_ref_stcb = 1;
171 return (read_queue_e);
175 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
177 struct sctp_extrcvinfo *seinfo;
178 struct sctp_sndrcvinfo *outinfo;
179 struct sctp_rcvinfo *rcvinfo;
180 struct sctp_nxtinfo *nxtinfo;
187 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
188 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
189 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
190 /* user does not want any ancillary data */
195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
196 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
198 seinfo = (struct sctp_extrcvinfo *)sinfo;
199 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
200 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
202 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
206 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
207 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
209 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
212 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
218 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
223 SCTP_BUF_LEN(ret) = 0;
225 /* We need a CMSG header followed by the struct */
226 cmh = mtod(ret, struct cmsghdr *);
228 * Make sure that there is no un-initialized padding between the
229 * cmsg header and cmsg data and after the cmsg data.
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
233 cmh->cmsg_level = IPPROTO_SCTP;
234 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
235 cmh->cmsg_type = SCTP_RCVINFO;
236 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
237 rcvinfo->rcv_sid = sinfo->sinfo_stream;
238 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
239 rcvinfo->rcv_flags = sinfo->sinfo_flags;
240 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
241 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
242 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
243 rcvinfo->rcv_context = sinfo->sinfo_context;
244 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
245 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
246 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
249 cmh->cmsg_level = IPPROTO_SCTP;
250 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
251 cmh->cmsg_type = SCTP_NXTINFO;
252 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
253 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
254 nxtinfo->nxt_flags = 0;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
256 nxtinfo->nxt_flags |= SCTP_UNORDERED;
258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
259 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
261 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
262 nxtinfo->nxt_flags |= SCTP_COMPLETE;
264 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
265 nxtinfo->nxt_length = seinfo->serinfo_next_length;
266 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
267 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
268 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
270 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
271 cmh->cmsg_level = IPPROTO_SCTP;
272 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
274 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
275 cmh->cmsg_type = SCTP_EXTRCV;
276 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
277 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
279 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
280 cmh->cmsg_type = SCTP_SNDRCV;
282 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
292 uint32_t gap, i, cumackp1;
294 int in_r = 0, in_nr = 0;
296 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
299 cumackp1 = asoc->cumulative_tsn + 1;
300 if (SCTP_TSN_GT(cumackp1, tsn)) {
302 * this tsn is behind the cum ack and thus we don't need to
303 * worry about it being moved from one to the other.
307 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
308 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
309 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
310 if ((in_r == 0) && (in_nr == 0)) {
312 panic("Things are really messed up now");
314 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
315 sctp_print_mapping_array(asoc);
319 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
321 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
322 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
323 asoc->highest_tsn_inside_nr_map = tsn;
325 if (tsn == asoc->highest_tsn_inside_map) {
326 /* We must back down to see what the new highest is */
327 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
328 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
329 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
330 asoc->highest_tsn_inside_map = i;
336 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
342 sctp_place_control_in_stream(struct sctp_stream_in *strm,
343 struct sctp_association *asoc,
344 struct sctp_queued_to_read *control)
346 struct sctp_queued_to_read *at;
347 struct sctp_readhead *q;
348 uint8_t flags, unordered;
350 flags = (control->sinfo_flags >> 8);
351 unordered = flags & SCTP_DATA_UNORDERED;
353 q = &strm->uno_inqueue;
354 if (asoc->idata_supported == 0) {
355 if (!TAILQ_EMPTY(q)) {
357 * Only one stream can be here in old style
362 TAILQ_INSERT_TAIL(q, control, next_instrm);
363 control->on_strm_q = SCTP_ON_UNORDERED;
369 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
370 control->end_added = 1;
371 control->first_frag_seen = 1;
372 control->last_frag_seen = 1;
374 if (TAILQ_EMPTY(q)) {
376 TAILQ_INSERT_HEAD(q, control, next_instrm);
378 control->on_strm_q = SCTP_ON_UNORDERED;
380 control->on_strm_q = SCTP_ON_ORDERED;
384 TAILQ_FOREACH(at, q, next_instrm) {
385 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
387 * one in queue is bigger than the new one,
388 * insert before this one
390 TAILQ_INSERT_BEFORE(at, control, next_instrm);
392 control->on_strm_q = SCTP_ON_UNORDERED;
394 control->on_strm_q = SCTP_ON_ORDERED;
397 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
399 * Gak, He sent me a duplicate msg id
400 * number?? return -1 to abort.
404 if (TAILQ_NEXT(at, next_instrm) == NULL) {
406 * We are at the end, insert it
409 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
410 sctp_log_strm_del(control, at,
411 SCTP_STR_LOG_FROM_INSERT_TL);
413 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
415 control->on_strm_q = SCTP_ON_UNORDERED;
417 control->on_strm_q = SCTP_ON_ORDERED;
428 sctp_abort_in_reasm(struct sctp_tcb *stcb,
429 struct sctp_queued_to_read *control,
430 struct sctp_tmit_chunk *chk,
431 int *abort_flag, int opspot)
433 char msg[SCTP_DIAG_INFO_LEN];
436 if (stcb->asoc.idata_supported) {
437 SCTP_SNPRINTF(msg, sizeof(msg),
438 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
440 control->fsn_included,
443 chk->rec.data.fsn, chk->rec.data.mid);
445 SCTP_SNPRINTF(msg, sizeof(msg),
446 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
448 control->fsn_included,
452 (uint16_t)chk->rec.data.mid);
454 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
455 sctp_m_freem(chk->data);
457 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
458 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
459 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
464 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
467 * The control could not be placed and must be cleaned.
469 struct sctp_tmit_chunk *chk, *nchk;
471 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
472 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
474 sctp_m_freem(chk->data);
476 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
478 sctp_free_remote_addr(control->whoFrom);
480 sctp_m_freem(control->data);
481 control->data = NULL;
483 sctp_free_a_readq(stcb, control);
487 * Queue the chunk either right into the socket buffer if it is the next one
488 * to go OR put it in the correct place in the delivery queue. If we do
489 * append to the so_buf, keep doing so until we are out of order as
490 * long as the control's entered are non-fragmented.
493 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
494 struct sctp_association *asoc,
495 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
498 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
499 * all the data in one stream this could happen quite rapidly. One
500 * could use the TSN to keep track of things, but this scheme breaks
501 * down in the other type of stream usage that could occur. Send a
502 * single msg to stream 0, send 4Billion messages to stream 1, now
503 * send a message to stream 0. You have a situation where the TSN
504 * has wrapped but not in the stream. Is this worth worrying about
505 * or should we just change our queue sort at the bottom to be by
508 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
509 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
510 * assignment this could happen... and I don't see how this would be
511 * a violation. So for now I am undecided an will leave the sort by
512 * SSN alone. Maybe a hybred approach is the answer
515 struct sctp_queued_to_read *at;
519 struct sctp_stream_in *strm;
520 char msg[SCTP_DIAG_INFO_LEN];
522 strm = &asoc->strmin[control->sinfo_stream];
523 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
524 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
526 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
527 /* The incoming sseq is behind where we last delivered? */
528 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
529 strm->last_mid_delivered, control->mid);
531 * throw it in the stream so it gets cleaned up in
532 * association destruction
534 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
535 if (asoc->idata_supported) {
536 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
537 strm->last_mid_delivered, control->sinfo_tsn,
538 control->sinfo_stream, control->mid);
540 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
541 (uint16_t)strm->last_mid_delivered,
543 control->sinfo_stream,
544 (uint16_t)control->mid);
546 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
547 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
548 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
554 asoc->size_on_all_streams += control->length;
555 sctp_ucount_incr(asoc->cnt_on_all_streams);
556 nxt_todel = strm->last_mid_delivered + 1;
557 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
558 /* can be delivered right away? */
559 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
560 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
562 /* EY it wont be queued if it could be delivered directly */
564 if (asoc->size_on_all_streams >= control->length) {
565 asoc->size_on_all_streams -= control->length;
568 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
570 asoc->size_on_all_streams = 0;
573 sctp_ucount_decr(asoc->cnt_on_all_streams);
574 strm->last_mid_delivered++;
575 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
576 sctp_add_to_readq(stcb->sctp_ep, stcb,
578 &stcb->sctp_socket->so_rcv, 1,
579 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
580 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
582 nxt_todel = strm->last_mid_delivered + 1;
583 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
584 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
585 if (control->on_strm_q == SCTP_ON_ORDERED) {
586 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
587 if (asoc->size_on_all_streams >= control->length) {
588 asoc->size_on_all_streams -= control->length;
591 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
593 asoc->size_on_all_streams = 0;
596 sctp_ucount_decr(asoc->cnt_on_all_streams);
599 panic("Huh control: %p is on_strm_q: %d",
600 control, control->on_strm_q);
603 control->on_strm_q = 0;
604 strm->last_mid_delivered++;
606 * We ignore the return of deliver_data here
607 * since we always can hold the chunk on the
608 * d-queue. And we have a finite number that
609 * can be delivered from the strq.
611 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
612 sctp_log_strm_del(control, NULL,
613 SCTP_STR_LOG_FROM_IMMED_DEL);
615 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
616 sctp_add_to_readq(stcb->sctp_ep, stcb,
618 &stcb->sctp_socket->so_rcv, 1,
619 SCTP_READ_LOCK_NOT_HELD,
622 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
630 * Ok, we did not deliver this guy, find the correct place
631 * to put it on the queue.
633 if (sctp_place_control_in_stream(strm, asoc, control)) {
634 SCTP_SNPRINTF(msg, sizeof(msg),
635 "Queue to str MID: %u duplicate", control->mid);
636 sctp_clean_up_control(stcb, control);
637 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
638 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
639 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
647 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
649 struct mbuf *m, *prev = NULL;
650 struct sctp_tcb *stcb;
652 stcb = control->stcb;
653 control->held_length = 0;
657 if (SCTP_BUF_LEN(m) == 0) {
658 /* Skip mbufs with NO length */
661 control->data = sctp_m_free(m);
664 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
665 m = SCTP_BUF_NEXT(prev);
668 control->tail_mbuf = prev;
673 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
674 if (control->on_read_q) {
676 * On read queue so we must increment the SB stuff,
677 * we assume caller has done any locks of SB.
679 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
681 m = SCTP_BUF_NEXT(m);
684 control->tail_mbuf = prev;
689 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
691 struct mbuf *prev = NULL;
692 struct sctp_tcb *stcb;
694 stcb = control->stcb;
697 panic("Control broken");
702 if (control->tail_mbuf == NULL) {
704 sctp_m_freem(control->data);
706 sctp_setup_tail_pointer(control);
709 control->tail_mbuf->m_next = m;
711 if (SCTP_BUF_LEN(m) == 0) {
712 /* Skip mbufs with NO length */
715 control->tail_mbuf->m_next = sctp_m_free(m);
716 m = control->tail_mbuf->m_next;
718 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
719 m = SCTP_BUF_NEXT(prev);
722 control->tail_mbuf = prev;
727 if (control->on_read_q) {
729 * On read queue so we must increment the SB stuff,
730 * we assume caller has done any locks of SB.
732 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
734 *added += SCTP_BUF_LEN(m);
735 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
736 m = SCTP_BUF_NEXT(m);
739 control->tail_mbuf = prev;
744 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
746 memset(nc, 0, sizeof(struct sctp_queued_to_read));
747 nc->sinfo_stream = control->sinfo_stream;
748 nc->mid = control->mid;
749 TAILQ_INIT(&nc->reasm);
750 nc->top_fsn = control->top_fsn;
751 nc->mid = control->mid;
752 nc->sinfo_flags = control->sinfo_flags;
753 nc->sinfo_ppid = control->sinfo_ppid;
754 nc->sinfo_context = control->sinfo_context;
755 nc->fsn_included = 0xffffffff;
756 nc->sinfo_tsn = control->sinfo_tsn;
757 nc->sinfo_cumtsn = control->sinfo_cumtsn;
758 nc->sinfo_assoc_id = control->sinfo_assoc_id;
759 nc->whoFrom = control->whoFrom;
760 atomic_add_int(&nc->whoFrom->ref_count, 1);
761 nc->stcb = control->stcb;
762 nc->port_from = control->port_from;
763 nc->do_not_ref_stcb = control->do_not_ref_stcb;
767 sctp_reset_a_control(struct sctp_queued_to_read *control,
768 struct sctp_inpcb *inp, uint32_t tsn)
770 control->fsn_included = tsn;
771 if (control->on_read_q) {
773 * We have to purge it from there, hopefully this will work
776 TAILQ_REMOVE(&inp->read_queue, control, next);
777 control->on_read_q = 0;
782 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
783 struct sctp_association *asoc,
784 struct sctp_stream_in *strm,
785 struct sctp_queued_to_read *control,
787 int inp_read_lock_held)
790 * Special handling for the old un-ordered data chunk. All the
791 * chunks/TSN's go to mid 0. So we have to do the old style watching
792 * to see if we have it all. If you return one, no other control
793 * entries on the un-ordered queue will be looked at. In theory
794 * there should be no others entries in reality, unless the guy is
795 * sending both unordered NDATA and unordered DATA...
797 struct sctp_tmit_chunk *chk, *lchk, *tchk;
799 struct sctp_queued_to_read *nc;
802 if (control->first_frag_seen == 0) {
803 /* Nothing we can do, we have not seen the first piece yet */
806 /* Collapse any we can */
809 fsn = control->fsn_included + 1;
810 /* Now what can we add? */
811 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
812 if (chk->rec.data.fsn == fsn) {
814 sctp_alloc_a_readq(stcb, nc);
818 memset(nc, 0, sizeof(struct sctp_queued_to_read));
819 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
820 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
824 if (control->end_added) {
826 if (!TAILQ_EMPTY(&control->reasm)) {
828 * Ok we have to move anything left
829 * on the control queue to a new
832 sctp_build_readq_entry_from_ctl(nc, control);
833 tchk = TAILQ_FIRST(&control->reasm);
834 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
835 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
836 if (asoc->size_on_reasm_queue >= tchk->send_size) {
837 asoc->size_on_reasm_queue -= tchk->send_size;
840 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
842 asoc->size_on_reasm_queue = 0;
845 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
846 nc->first_frag_seen = 1;
847 nc->fsn_included = tchk->rec.data.fsn;
848 nc->data = tchk->data;
849 nc->sinfo_ppid = tchk->rec.data.ppid;
850 nc->sinfo_tsn = tchk->rec.data.tsn;
851 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
853 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
854 sctp_setup_tail_pointer(nc);
855 tchk = TAILQ_FIRST(&control->reasm);
857 /* Spin the rest onto the queue */
859 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
860 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
861 tchk = TAILQ_FIRST(&control->reasm);
864 * Now lets add it to the queue
865 * after removing control
867 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
868 nc->on_strm_q = SCTP_ON_UNORDERED;
869 if (control->on_strm_q) {
870 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
871 control->on_strm_q = 0;
874 if (control->pdapi_started) {
875 strm->pd_api_started = 0;
876 control->pdapi_started = 0;
878 if (control->on_strm_q) {
879 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
880 control->on_strm_q = 0;
881 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
883 if (control->on_read_q == 0) {
884 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
885 &stcb->sctp_socket->so_rcv, control->end_added,
886 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
888 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
889 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
891 * Switch to the new guy and
897 if (nc->on_strm_q == 0) {
898 sctp_free_a_readq(stcb, nc);
903 sctp_free_a_readq(stcb, nc);
910 if (cnt_added && strm->pd_api_started) {
911 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
913 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
914 strm->pd_api_started = 1;
915 control->pdapi_started = 1;
916 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
917 &stcb->sctp_socket->so_rcv, control->end_added,
918 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
919 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
927 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
928 struct sctp_association *asoc,
929 struct sctp_queued_to_read *control,
930 struct sctp_tmit_chunk *chk,
933 struct sctp_tmit_chunk *at;
937 * Here we need to place the chunk into the control structure sorted
938 * in the correct order.
940 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
941 /* Its the very first one. */
942 SCTPDBG(SCTP_DEBUG_XXX,
943 "chunk is a first fsn: %u becomes fsn_included\n",
945 at = TAILQ_FIRST(&control->reasm);
946 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
948 * The first chunk in the reassembly is a smaller
949 * TSN than this one, even though this has a first,
950 * it must be from a subsequent msg.
954 if (control->first_frag_seen) {
956 * In old un-ordered we can reassembly on one
957 * control multiple messages. As long as the next
958 * FIRST is greater then the old first (TSN i.e. FSN
964 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
966 * Easy way the start of a new guy beyond
971 if ((chk->rec.data.fsn == control->fsn_included) ||
972 (control->pdapi_started)) {
974 * Ok this should not happen, if it does we
975 * started the pd-api on the higher TSN
976 * (since the equals part is a TSN failure
979 * We are completly hosed in that case since
980 * I have no way to recover. This really
981 * will only happen if we can get more TSN's
982 * higher before the pd-api-point.
984 sctp_abort_in_reasm(stcb, control, chk,
986 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
991 * Ok we have two firsts and the one we just got is
992 * smaller than the one we previously placed.. yuck!
993 * We must swap them out.
996 tdata = control->data;
997 control->data = chk->data;
999 /* Save the lengths */
1000 chk->send_size = control->length;
1001 /* Recompute length of control and tail pointer */
1002 sctp_setup_tail_pointer(control);
1003 /* Fix the FSN included */
1004 tmp = control->fsn_included;
1005 control->fsn_included = chk->rec.data.fsn;
1006 chk->rec.data.fsn = tmp;
1007 /* Fix the TSN included */
1008 tmp = control->sinfo_tsn;
1009 control->sinfo_tsn = chk->rec.data.tsn;
1010 chk->rec.data.tsn = tmp;
1011 /* Fix the PPID included */
1012 tmp = control->sinfo_ppid;
1013 control->sinfo_ppid = chk->rec.data.ppid;
1014 chk->rec.data.ppid = tmp;
1015 /* Fix tail pointer */
1018 control->first_frag_seen = 1;
1019 control->fsn_included = chk->rec.data.fsn;
1020 control->top_fsn = chk->rec.data.fsn;
1021 control->sinfo_tsn = chk->rec.data.tsn;
1022 control->sinfo_ppid = chk->rec.data.ppid;
1023 control->data = chk->data;
1024 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1026 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1027 sctp_setup_tail_pointer(control);
1032 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1033 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1035 * This one in queue is bigger than the new one,
1036 * insert the new one before at.
1038 asoc->size_on_reasm_queue += chk->send_size;
1039 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1041 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1043 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1045 * They sent a duplicate fsn number. This really
1046 * should not happen since the FSN is a TSN and it
1047 * should have been dropped earlier.
1049 sctp_abort_in_reasm(stcb, control, chk,
1051 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1056 if (inserted == 0) {
1057 /* Its at the end */
1058 asoc->size_on_reasm_queue += chk->send_size;
1059 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1060 control->top_fsn = chk->rec.data.fsn;
1061 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1066 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1067 struct sctp_stream_in *strm, int inp_read_lock_held)
1070 * Given a stream, strm, see if any of the SSN's on it that are
1071 * fragmented are ready to deliver. If so go ahead and place them on
1072 * the read queue. In so placing if we have hit the end, then we
1073 * need to remove them from the stream's queue.
1075 struct sctp_queued_to_read *control, *nctl = NULL;
1076 uint32_t next_to_del;
1080 if (stcb->sctp_socket) {
1081 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1082 stcb->sctp_ep->partial_delivery_point);
1084 pd_point = stcb->sctp_ep->partial_delivery_point;
1086 control = TAILQ_FIRST(&strm->uno_inqueue);
1088 if ((control != NULL) &&
1089 (asoc->idata_supported == 0)) {
1090 /* Special handling needed for "old" data format */
1091 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1095 if (strm->pd_api_started) {
1096 /* Can't add more */
1100 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1101 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1102 nctl = TAILQ_NEXT(control, next_instrm);
1103 if (control->end_added) {
1104 /* We just put the last bit on */
1105 if (control->on_strm_q) {
1107 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1108 panic("Huh control: %p on_q: %d -- not unordered?",
1109 control, control->on_strm_q);
1112 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1113 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1114 control->on_strm_q = 0;
1116 if (control->on_read_q == 0) {
1117 sctp_add_to_readq(stcb->sctp_ep, stcb,
1119 &stcb->sctp_socket->so_rcv, control->end_added,
1120 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1123 /* Can we do a PD-API for this un-ordered guy? */
1124 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1125 strm->pd_api_started = 1;
1126 control->pdapi_started = 1;
1127 sctp_add_to_readq(stcb->sctp_ep, stcb,
1129 &stcb->sctp_socket->so_rcv, control->end_added,
1130 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1138 control = TAILQ_FIRST(&strm->inqueue);
1139 if (strm->pd_api_started) {
1140 /* Can't add more */
1143 if (control == NULL) {
1146 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1148 * Ok the guy at the top was being partially delivered
1149 * completed, so we remove it. Note the pd_api flag was
1150 * taken off when the chunk was merged on in
1151 * sctp_queue_data_for_reasm below.
1153 nctl = TAILQ_NEXT(control, next_instrm);
1154 SCTPDBG(SCTP_DEBUG_XXX,
1155 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1156 control, control->end_added, control->mid,
1157 control->top_fsn, control->fsn_included,
1158 strm->last_mid_delivered);
1159 if (control->end_added) {
1160 if (control->on_strm_q) {
1162 if (control->on_strm_q != SCTP_ON_ORDERED) {
1163 panic("Huh control: %p on_q: %d -- not ordered?",
1164 control, control->on_strm_q);
1167 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1168 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1169 if (asoc->size_on_all_streams >= control->length) {
1170 asoc->size_on_all_streams -= control->length;
1173 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1175 asoc->size_on_all_streams = 0;
1178 sctp_ucount_decr(asoc->cnt_on_all_streams);
1179 control->on_strm_q = 0;
1181 if (strm->pd_api_started && control->pdapi_started) {
1182 control->pdapi_started = 0;
1183 strm->pd_api_started = 0;
1185 if (control->on_read_q == 0) {
1186 sctp_add_to_readq(stcb->sctp_ep, stcb,
1188 &stcb->sctp_socket->so_rcv, control->end_added,
1189 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1194 if (strm->pd_api_started) {
1196 * Can't add more must have gotten an un-ordered above being
1197 * partially delivered.
1202 next_to_del = strm->last_mid_delivered + 1;
1204 SCTPDBG(SCTP_DEBUG_XXX,
1205 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1206 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1208 nctl = TAILQ_NEXT(control, next_instrm);
1209 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1210 (control->first_frag_seen)) {
1213 /* Ok we can deliver it onto the stream. */
1214 if (control->end_added) {
1215 /* We are done with it afterwards */
1216 if (control->on_strm_q) {
1218 if (control->on_strm_q != SCTP_ON_ORDERED) {
1219 panic("Huh control: %p on_q: %d -- not ordered?",
1220 control, control->on_strm_q);
1223 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1224 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1225 if (asoc->size_on_all_streams >= control->length) {
1226 asoc->size_on_all_streams -= control->length;
1229 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1231 asoc->size_on_all_streams = 0;
1234 sctp_ucount_decr(asoc->cnt_on_all_streams);
1235 control->on_strm_q = 0;
1239 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1241 * A singleton now slipping through - mark
1242 * it non-revokable too
1244 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1245 } else if (control->end_added == 0) {
1247 * Check if we can defer adding until its
1250 if ((control->length < pd_point) || (strm->pd_api_started)) {
1252 * Don't need it or cannot add more
1253 * (one being delivered that way)
1258 done = (control->end_added) && (control->last_frag_seen);
1259 if (control->on_read_q == 0) {
1261 if (asoc->size_on_all_streams >= control->length) {
1262 asoc->size_on_all_streams -= control->length;
1265 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1267 asoc->size_on_all_streams = 0;
1270 strm->pd_api_started = 1;
1271 control->pdapi_started = 1;
1273 sctp_add_to_readq(stcb->sctp_ep, stcb,
1275 &stcb->sctp_socket->so_rcv, control->end_added,
1276 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1278 strm->last_mid_delivered = next_to_del;
1291 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1292 struct sctp_stream_in *strm,
1293 struct sctp_tcb *stcb, struct sctp_association *asoc,
1294 struct sctp_tmit_chunk *chk, int hold_rlock)
1297 * Given a control and a chunk, merge the data from the chk onto the
1298 * control and free up the chunk resources.
1303 if (control->on_read_q && (hold_rlock == 0)) {
1305 * Its being pd-api'd so we must do some locks.
1307 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1310 if (control->data == NULL) {
1311 control->data = chk->data;
1312 sctp_setup_tail_pointer(control);
1314 sctp_add_to_tail_pointer(control, chk->data, &added);
1316 control->fsn_included = chk->rec.data.fsn;
1317 asoc->size_on_reasm_queue -= chk->send_size;
1318 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1319 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1321 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1322 control->first_frag_seen = 1;
1323 control->sinfo_tsn = chk->rec.data.tsn;
1324 control->sinfo_ppid = chk->rec.data.ppid;
1326 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1328 if ((control->on_strm_q) && (control->on_read_q)) {
1329 if (control->pdapi_started) {
1330 control->pdapi_started = 0;
1331 strm->pd_api_started = 0;
1333 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1335 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1336 control->on_strm_q = 0;
1337 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1339 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1341 * Don't need to decrement
1342 * size_on_all_streams, since control is on
1345 sctp_ucount_decr(asoc->cnt_on_all_streams);
1346 control->on_strm_q = 0;
1348 } else if (control->on_strm_q) {
1349 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1350 control->on_strm_q);
1354 control->end_added = 1;
1355 control->last_frag_seen = 1;
1358 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1360 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1365 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1366 * queue, see if anthing can be delivered. If so pull it off (or as much as
1367 * we can. If we run out of space then we must dump what we can and set the
1368 * appropriate flag to say we queued what we could.
1371 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1372 struct sctp_queued_to_read *control,
1373 struct sctp_tmit_chunk *chk,
1374 int created_control,
1375 int *abort_flag, uint32_t tsn)
1378 struct sctp_tmit_chunk *at, *nat;
1379 struct sctp_stream_in *strm;
1380 int do_wakeup, unordered;
1383 strm = &asoc->strmin[control->sinfo_stream];
1385 * For old un-ordered data chunks.
1387 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1392 /* Must be added to the stream-in queue */
1393 if (created_control) {
1394 if (unordered == 0) {
1395 sctp_ucount_incr(asoc->cnt_on_all_streams);
1397 if (sctp_place_control_in_stream(strm, asoc, control)) {
1398 /* Duplicate SSN? */
1399 sctp_abort_in_reasm(stcb, control, chk,
1401 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1402 sctp_clean_up_control(stcb, control);
1405 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1407 * Ok we created this control and now lets validate
1408 * that its legal i.e. there is a B bit set, if not
1409 * and we have up to the cum-ack then its invalid.
1411 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1412 sctp_abort_in_reasm(stcb, control, chk,
1414 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1419 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1420 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1424 * Ok we must queue the chunk into the reasembly portion: o if its
1425 * the first it goes to the control mbuf. o if its not first but the
1426 * next in sequence it goes to the control, and each succeeding one
1427 * in order also goes. o if its not in order we place it on the list
1430 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1431 /* Its the very first one. */
1432 SCTPDBG(SCTP_DEBUG_XXX,
1433 "chunk is a first fsn: %u becomes fsn_included\n",
1435 if (control->first_frag_seen) {
1437 * Error on senders part, they either sent us two
1438 * data chunks with FIRST, or they sent two
1439 * un-ordered chunks that were fragmented at the
1440 * same time in the same stream.
1442 sctp_abort_in_reasm(stcb, control, chk,
1444 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1447 control->first_frag_seen = 1;
1448 control->sinfo_ppid = chk->rec.data.ppid;
1449 control->sinfo_tsn = chk->rec.data.tsn;
1450 control->fsn_included = chk->rec.data.fsn;
1451 control->data = chk->data;
1452 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1454 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1455 sctp_setup_tail_pointer(control);
1456 asoc->size_on_all_streams += control->length;
1458 /* Place the chunk in our list */
1461 if (control->last_frag_seen == 0) {
1462 /* Still willing to raise highest FSN seen */
1463 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1464 SCTPDBG(SCTP_DEBUG_XXX,
1465 "We have a new top_fsn: %u\n",
1467 control->top_fsn = chk->rec.data.fsn;
1469 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1470 SCTPDBG(SCTP_DEBUG_XXX,
1471 "The last fsn is now in place fsn: %u\n",
1473 control->last_frag_seen = 1;
1474 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1475 SCTPDBG(SCTP_DEBUG_XXX,
1476 "New fsn: %u is not at top_fsn: %u -- abort\n",
1479 sctp_abort_in_reasm(stcb, control, chk,
1481 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1485 if (asoc->idata_supported || control->first_frag_seen) {
1487 * For IDATA we always check since we know
1488 * that the first fragment is 0. For old
1489 * DATA we have to receive the first before
1490 * we know the first FSN (which is the TSN).
1492 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1494 * We have already delivered up to
1497 sctp_abort_in_reasm(stcb, control, chk,
1499 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1504 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1505 /* Second last? huh? */
1506 SCTPDBG(SCTP_DEBUG_XXX,
1507 "Duplicate last fsn: %u (top: %u) -- abort\n",
1508 chk->rec.data.fsn, control->top_fsn);
1509 sctp_abort_in_reasm(stcb, control,
1511 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1514 if (asoc->idata_supported || control->first_frag_seen) {
1516 * For IDATA we always check since we know
1517 * that the first fragment is 0. For old
1518 * DATA we have to receive the first before
1519 * we know the first FSN (which is the TSN).
1522 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1524 * We have already delivered up to
1527 SCTPDBG(SCTP_DEBUG_XXX,
1528 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1529 chk->rec.data.fsn, control->fsn_included);
1530 sctp_abort_in_reasm(stcb, control, chk,
1532 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1537 * validate not beyond top FSN if we have seen last
1540 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1541 SCTPDBG(SCTP_DEBUG_XXX,
1542 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1545 sctp_abort_in_reasm(stcb, control, chk,
1547 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1552 * If we reach here, we need to place the new chunk in the
1553 * reassembly for this control.
1555 SCTPDBG(SCTP_DEBUG_XXX,
1556 "chunk is a not first fsn: %u needs to be inserted\n",
1558 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1559 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1561 * This one in queue is bigger than the new
1562 * one, insert the new one before at.
1564 SCTPDBG(SCTP_DEBUG_XXX,
1565 "Insert it before fsn: %u\n",
1567 asoc->size_on_reasm_queue += chk->send_size;
1568 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1569 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1572 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1574 * Gak, He sent me a duplicate str seq
1578 * foo bar, I guess I will just free this
1579 * new guy, should we abort too? FIX ME
1580 * MAYBE? Or it COULD be that the SSN's have
1581 * wrapped. Maybe I should compare to TSN
1582 * somehow... sigh for now just blow away
1585 SCTPDBG(SCTP_DEBUG_XXX,
1586 "Duplicate to fsn: %u -- abort\n",
1588 sctp_abort_in_reasm(stcb, control,
1590 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1594 if (inserted == 0) {
1595 /* Goes on the end */
1596 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1598 asoc->size_on_reasm_queue += chk->send_size;
1599 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1600 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1604 * Ok lets see if we can suck any up into the control structure that
1605 * are in seq if it makes sense.
1609 * If the first fragment has not been seen there is no sense in
1612 if (control->first_frag_seen) {
1613 next_fsn = control->fsn_included + 1;
1614 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1615 if (at->rec.data.fsn == next_fsn) {
1616 /* We can add this one now to the control */
1617 SCTPDBG(SCTP_DEBUG_XXX,
1618 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1621 next_fsn, control->fsn_included);
1622 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1623 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1624 if (control->on_read_q) {
1628 * We only add to the
1629 * size-on-all-streams if its not on
1630 * the read q. The read q flag will
1631 * cause a sballoc so its accounted
1634 asoc->size_on_all_streams += lenadded;
1637 if (control->end_added && control->pdapi_started) {
1638 if (strm->pd_api_started) {
1639 strm->pd_api_started = 0;
1640 control->pdapi_started = 0;
1642 if (control->on_read_q == 0) {
1643 sctp_add_to_readq(stcb->sctp_ep, stcb,
1645 &stcb->sctp_socket->so_rcv, control->end_added,
1646 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1656 /* Need to wakeup the reader */
1657 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1661 static struct sctp_queued_to_read *
1662 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1664 struct sctp_queued_to_read *control;
1667 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1668 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1673 if (idata_supported) {
1674 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1675 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1680 control = TAILQ_FIRST(&strm->uno_inqueue);
1687 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1688 struct mbuf **m, int offset, int chk_length,
1689 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1690 int *break_flag, int last_chunk, uint8_t chk_type)
1692 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1693 uint32_t tsn, fsn, gap, mid;
1696 int need_reasm_check = 0;
1698 struct mbuf *op_err;
1699 char msg[SCTP_DIAG_INFO_LEN];
1700 struct sctp_queued_to_read *control, *ncontrol;
1703 struct sctp_stream_reset_list *liste;
1706 int created_control = 0;
1708 if (chk_type == SCTP_IDATA) {
1709 struct sctp_idata_chunk *chunk, chunk_buf;
1711 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1712 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1713 chk_flags = chunk->ch.chunk_flags;
1714 clen = sizeof(struct sctp_idata_chunk);
1715 tsn = ntohl(chunk->dp.tsn);
1716 sid = ntohs(chunk->dp.sid);
1717 mid = ntohl(chunk->dp.mid);
1718 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1720 ppid = chunk->dp.ppid_fsn.ppid;
1722 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1723 ppid = 0xffffffff; /* Use as an invalid value. */
1726 struct sctp_data_chunk *chunk, chunk_buf;
1728 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1729 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1730 chk_flags = chunk->ch.chunk_flags;
1731 clen = sizeof(struct sctp_data_chunk);
1732 tsn = ntohl(chunk->dp.tsn);
1733 sid = ntohs(chunk->dp.sid);
1734 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1736 ppid = chunk->dp.ppid;
1738 if ((size_t)chk_length == clen) {
1740 * Need to send an abort since we had a empty data chunk.
1742 op_err = sctp_generate_no_user_data_cause(tsn);
1743 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1744 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1748 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1749 asoc->send_sack = 1;
1751 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1752 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1753 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1758 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1759 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1760 /* It is a duplicate */
1761 SCTP_STAT_INCR(sctps_recvdupdata);
1762 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1763 /* Record a dup for the next outbound sack */
1764 asoc->dup_tsns[asoc->numduptsns] = tsn;
1767 asoc->send_sack = 1;
1770 /* Calculate the number of TSN's between the base and this TSN */
1771 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1772 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1773 /* Can't hold the bit in the mapping at max array, toss it */
1776 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1777 SCTP_TCB_LOCK_ASSERT(stcb);
1778 if (sctp_expand_mapping_array(asoc, gap)) {
1779 /* Can't expand, drop it */
1783 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1786 /* See if we have received this one already */
1787 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1788 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1789 SCTP_STAT_INCR(sctps_recvdupdata);
1790 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1791 /* Record a dup for the next outbound sack */
1792 asoc->dup_tsns[asoc->numduptsns] = tsn;
1795 asoc->send_sack = 1;
1799 * Check to see about the GONE flag, duplicates would cause a sack
1800 * to be sent up above
1802 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1803 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1804 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1806 * wait a minute, this guy is gone, there is no longer a
1807 * receiver. Send peer an ABORT!
1809 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1810 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1815 * Now before going further we see if there is room. If NOT then we
1816 * MAY let one through only IF this TSN is the one we are waiting
1817 * for on a partial delivery API.
1820 /* Is the stream valid? */
1821 if (sid >= asoc->streamincnt) {
1822 struct sctp_error_invalid_stream *cause;
1824 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1825 0, M_NOWAIT, 1, MT_DATA);
1826 if (op_err != NULL) {
1827 /* add some space up front so prepend will work well */
1828 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1829 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1831 * Error causes are just param's and this one has
1832 * two back to back phdr, one with the error type
1833 * and size, the other with the streamid and a rsvd
1835 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1836 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1837 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1838 cause->stream_id = htons(sid);
1839 cause->reserved = htons(0);
1840 sctp_queue_op_err(stcb, op_err);
1842 SCTP_STAT_INCR(sctps_badsid);
1843 SCTP_TCB_LOCK_ASSERT(stcb);
1844 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1845 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1846 asoc->highest_tsn_inside_nr_map = tsn;
1848 if (tsn == (asoc->cumulative_tsn + 1)) {
1849 /* Update cum-ack */
1850 asoc->cumulative_tsn = tsn;
1855 * If its a fragmented message, lets see if we can find the control
1856 * on the reassembly queues.
1858 if ((chk_type == SCTP_IDATA) &&
1859 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1862 * The first *must* be fsn 0, and other (middle/end) pieces
1863 * can *not* be fsn 0. XXX: This can happen in case of a
1864 * wrap around. Ignore is for now.
1866 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1869 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1870 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1871 chk_flags, control);
1872 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1873 /* See if we can find the re-assembly entity */
1874 if (control != NULL) {
1875 /* We found something, does it belong? */
1876 if (ordered && (mid != control->mid)) {
1877 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1879 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1880 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1881 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1885 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1887 * We can't have a switched order with an
1890 SCTP_SNPRINTF(msg, sizeof(msg),
1891 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1895 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1897 * We can't have a switched unordered with a
1900 SCTP_SNPRINTF(msg, sizeof(msg),
1901 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1908 * Its a complete segment. Lets validate we don't have a
1909 * re-assembly going on with the same Stream/Seq (for
1910 * ordered) or in the same Stream for unordered.
1912 if (control != NULL) {
1913 if (ordered || asoc->idata_supported) {
1914 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1916 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1919 if ((tsn == control->fsn_included + 1) &&
1920 (control->end_added == 0)) {
1921 SCTP_SNPRINTF(msg, sizeof(msg),
1922 "Illegal message sequence, missing end for MID: %8.8x",
1923 control->fsn_included);
1931 /* now do the tests */
1932 if (((asoc->cnt_on_all_streams +
1933 asoc->cnt_on_reasm_queue +
1934 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1935 (((int)asoc->my_rwnd) <= 0)) {
1937 * When we have NO room in the rwnd we check to make sure
1938 * the reader is doing its job...
1940 if (stcb->sctp_socket->so_rcv.sb_cc) {
1941 /* some to read, wake-up */
1942 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1944 /* now is it in the mapping array of what we have accepted? */
1945 if (chk_type == SCTP_DATA) {
1946 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1947 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1948 /* Nope not in the valid range dump it */
1950 sctp_set_rwnd(stcb, asoc);
1951 if ((asoc->cnt_on_all_streams +
1952 asoc->cnt_on_reasm_queue +
1953 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1954 SCTP_STAT_INCR(sctps_datadropchklmt);
1956 SCTP_STAT_INCR(sctps_datadroprwnd);
1962 if (control == NULL) {
1965 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1970 #ifdef SCTP_ASOCLOG_OF_TSNS
1971 SCTP_TCB_LOCK_ASSERT(stcb);
1972 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1973 asoc->tsn_in_at = 0;
1974 asoc->tsn_in_wrapped = 1;
1976 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1977 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1978 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1979 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1980 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1981 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1982 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1983 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1987 * Before we continue lets validate that we are not being fooled by
1988 * an evil attacker. We can only have Nk chunks based on our TSN
1989 * spread allowed by the mapping array N * 8 bits, so there is no
1990 * way our stream sequence numbers could have wrapped. We of course
1991 * only validate the FIRST fragment so the bit must be set.
1993 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1994 (TAILQ_EMPTY(&asoc->resetHead)) &&
1995 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1996 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1997 /* The incoming sseq is behind where we last delivered? */
1998 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1999 mid, asoc->strmin[sid].last_mid_delivered);
2001 if (asoc->idata_supported) {
2002 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2003 asoc->strmin[sid].last_mid_delivered,
2008 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2009 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2014 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2015 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2016 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2020 if (chk_type == SCTP_IDATA) {
2021 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2023 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2025 if (last_chunk == 0) {
2026 if (chk_type == SCTP_IDATA) {
2027 dmbuf = SCTP_M_COPYM(*m,
2028 (offset + sizeof(struct sctp_idata_chunk)),
2031 dmbuf = SCTP_M_COPYM(*m,
2032 (offset + sizeof(struct sctp_data_chunk)),
2035 #ifdef SCTP_MBUF_LOGGING
2036 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2037 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2041 /* We can steal the last chunk */
2045 /* lop off the top part */
2046 if (chk_type == SCTP_IDATA) {
2047 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2049 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2051 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2052 l_len = SCTP_BUF_LEN(dmbuf);
2055 * need to count up the size hopefully does not hit
2061 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2062 l_len += SCTP_BUF_LEN(lat);
2065 if (l_len > the_len) {
2066 /* Trim the end round bytes off too */
2067 m_adj(dmbuf, -(l_len - the_len));
2070 if (dmbuf == NULL) {
2071 SCTP_STAT_INCR(sctps_nomem);
2075 * Now no matter what, we need a control, get one if we don't have
2076 * one (we may have gotten it above when we found the message was
2079 if (control == NULL) {
2080 sctp_alloc_a_readq(stcb, control);
2081 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2086 if (control == NULL) {
2087 SCTP_STAT_INCR(sctps_nomem);
2090 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2093 control->data = dmbuf;
2094 control->tail_mbuf = NULL;
2095 for (mm = control->data; mm; mm = mm->m_next) {
2096 control->length += SCTP_BUF_LEN(mm);
2097 if (SCTP_BUF_NEXT(mm) == NULL) {
2098 control->tail_mbuf = mm;
2101 control->end_added = 1;
2102 control->last_frag_seen = 1;
2103 control->first_frag_seen = 1;
2104 control->fsn_included = fsn;
2105 control->top_fsn = fsn;
2107 created_control = 1;
2109 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2110 chk_flags, ordered, mid, control);
2111 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2112 TAILQ_EMPTY(&asoc->resetHead) &&
2114 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2115 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2116 /* Candidate for express delivery */
2118 * Its not fragmented, No PD-API is up, Nothing in the
2119 * delivery queue, Its un-ordered OR ordered and the next to
2120 * deliver AND nothing else is stuck on the stream queue,
2121 * And there is room for it in the socket buffer. Lets just
2122 * stuff it up the buffer....
2124 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2125 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2126 asoc->highest_tsn_inside_nr_map = tsn;
2128 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2131 sctp_add_to_readq(stcb->sctp_ep, stcb,
2132 control, &stcb->sctp_socket->so_rcv,
2133 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2135 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2136 /* for ordered, bump what we delivered */
2137 asoc->strmin[sid].last_mid_delivered++;
2139 SCTP_STAT_INCR(sctps_recvexpress);
2140 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2141 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2142 SCTP_STR_LOG_FROM_EXPRS_DEL);
2145 goto finish_express_del;
2148 /* Now will we need a chunk too? */
2149 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2150 sctp_alloc_a_chunk(stcb, chk);
2152 /* No memory so we drop the chunk */
2153 SCTP_STAT_INCR(sctps_nomem);
2154 if (last_chunk == 0) {
2155 /* we copied it, free the copy */
2156 sctp_m_freem(dmbuf);
2160 chk->rec.data.tsn = tsn;
2161 chk->no_fr_allowed = 0;
2162 chk->rec.data.fsn = fsn;
2163 chk->rec.data.mid = mid;
2164 chk->rec.data.sid = sid;
2165 chk->rec.data.ppid = ppid;
2166 chk->rec.data.context = stcb->asoc.context;
2167 chk->rec.data.doing_fast_retransmit = 0;
2168 chk->rec.data.rcv_flags = chk_flags;
2170 chk->send_size = the_len;
2172 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2175 atomic_add_int(&net->ref_count, 1);
2178 /* Set the appropriate TSN mark */
2179 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2180 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2181 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2182 asoc->highest_tsn_inside_nr_map = tsn;
2185 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2186 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2187 asoc->highest_tsn_inside_map = tsn;
2190 /* Now is it complete (i.e. not fragmented)? */
2191 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2193 * Special check for when streams are resetting. We could be
2194 * more smart about this and check the actual stream to see
2195 * if it is not being reset.. that way we would not create a
2196 * HOLB when amongst streams being reset and those not being
2200 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2201 SCTP_TSN_GT(tsn, liste->tsn)) {
2203 * yep its past where we need to reset... go ahead
2206 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2208 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2210 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2211 unsigned char inserted = 0;
2213 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2214 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2219 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2224 if (inserted == 0) {
2226 * must be put at end, use prevP
2227 * (all setup from loop) to setup
2230 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2233 goto finish_express_del;
2235 if (chk_flags & SCTP_DATA_UNORDERED) {
2236 /* queue directly into socket buffer */
2237 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2239 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2240 sctp_add_to_readq(stcb->sctp_ep, stcb,
2242 &stcb->sctp_socket->so_rcv, 1,
2243 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2246 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2248 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2256 goto finish_express_del;
2258 /* If we reach here its a reassembly */
2259 need_reasm_check = 1;
2260 SCTPDBG(SCTP_DEBUG_XXX,
2261 "Queue data to stream for reasm control: %p MID: %u\n",
2263 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2266 * the assoc is now gone and chk was put onto the reasm
2267 * queue, which has all been freed.
2275 /* Here we tidy up things */
2276 if (tsn == (asoc->cumulative_tsn + 1)) {
2277 /* Update cum-ack */
2278 asoc->cumulative_tsn = tsn;
2284 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2286 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2288 SCTP_STAT_INCR(sctps_recvdata);
2289 /* Set it present please */
2290 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2291 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2293 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2294 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2295 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2297 if (need_reasm_check) {
2298 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2299 need_reasm_check = 0;
2301 /* check the special flag for stream resets */
2302 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2303 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2305 * we have finished working through the backlogged TSN's now
2306 * time to reset streams. 1: call reset function. 2: free
2307 * pending_reply space 3: distribute any chunks in
2308 * pending_reply_queue.
2310 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2311 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2312 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2313 SCTP_FREE(liste, SCTP_M_STRESET);
2314 /* sa_ignore FREED_MEMORY */
2315 liste = TAILQ_FIRST(&asoc->resetHead);
2316 if (TAILQ_EMPTY(&asoc->resetHead)) {
2317 /* All can be removed */
2318 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2319 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2320 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2324 if (need_reasm_check) {
2325 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2326 need_reasm_check = 0;
2330 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2331 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2335 * if control->sinfo_tsn is <= liste->tsn we
2336 * can process it which is the NOT of
2337 * control->sinfo_tsn > liste->tsn
2339 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2340 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2344 if (need_reasm_check) {
2345 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2346 need_reasm_check = 0;
2354 static const int8_t sctp_map_lookup_tab[256] = {
2355 0, 1, 0, 2, 0, 1, 0, 3,
2356 0, 1, 0, 2, 0, 1, 0, 4,
2357 0, 1, 0, 2, 0, 1, 0, 3,
2358 0, 1, 0, 2, 0, 1, 0, 5,
2359 0, 1, 0, 2, 0, 1, 0, 3,
2360 0, 1, 0, 2, 0, 1, 0, 4,
2361 0, 1, 0, 2, 0, 1, 0, 3,
2362 0, 1, 0, 2, 0, 1, 0, 6,
2363 0, 1, 0, 2, 0, 1, 0, 3,
2364 0, 1, 0, 2, 0, 1, 0, 4,
2365 0, 1, 0, 2, 0, 1, 0, 3,
2366 0, 1, 0, 2, 0, 1, 0, 5,
2367 0, 1, 0, 2, 0, 1, 0, 3,
2368 0, 1, 0, 2, 0, 1, 0, 4,
2369 0, 1, 0, 2, 0, 1, 0, 3,
2370 0, 1, 0, 2, 0, 1, 0, 7,
2371 0, 1, 0, 2, 0, 1, 0, 3,
2372 0, 1, 0, 2, 0, 1, 0, 4,
2373 0, 1, 0, 2, 0, 1, 0, 3,
2374 0, 1, 0, 2, 0, 1, 0, 5,
2375 0, 1, 0, 2, 0, 1, 0, 3,
2376 0, 1, 0, 2, 0, 1, 0, 4,
2377 0, 1, 0, 2, 0, 1, 0, 3,
2378 0, 1, 0, 2, 0, 1, 0, 6,
2379 0, 1, 0, 2, 0, 1, 0, 3,
2380 0, 1, 0, 2, 0, 1, 0, 4,
2381 0, 1, 0, 2, 0, 1, 0, 3,
2382 0, 1, 0, 2, 0, 1, 0, 5,
2383 0, 1, 0, 2, 0, 1, 0, 3,
2384 0, 1, 0, 2, 0, 1, 0, 4,
2385 0, 1, 0, 2, 0, 1, 0, 3,
2386 0, 1, 0, 2, 0, 1, 0, 8
2391 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2394 * Now we also need to check the mapping array in a couple of ways.
2395 * 1) Did we move the cum-ack point?
2397 * When you first glance at this you might think that all entries
2398 * that make up the position of the cum-ack would be in the
2399 * nr-mapping array only.. i.e. things up to the cum-ack are always
2400 * deliverable. Thats true with one exception, when its a fragmented
2401 * message we may not deliver the data until some threshold (or all
2402 * of it) is in place. So we must OR the nr_mapping_array and
2403 * mapping_array to get a true picture of the cum-ack.
2405 struct sctp_association *asoc;
2408 int slide_from, slide_end, lgap, distance;
2409 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2413 old_cumack = asoc->cumulative_tsn;
2414 old_base = asoc->mapping_array_base_tsn;
2415 old_highest = asoc->highest_tsn_inside_map;
2417 * We could probably improve this a small bit by calculating the
2418 * offset of the current cum-ack as the starting point.
2421 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2422 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2426 /* there is a 0 bit */
2427 at += sctp_map_lookup_tab[val];
2431 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2433 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2434 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2436 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2437 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2439 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2440 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2441 sctp_print_mapping_array(asoc);
2442 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2443 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2445 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2446 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2449 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2450 highest_tsn = asoc->highest_tsn_inside_nr_map;
2452 highest_tsn = asoc->highest_tsn_inside_map;
2454 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2455 /* The complete array was completed by a single FR */
2456 /* highest becomes the cum-ack */
2462 /* clear the array */
2463 clr = ((at + 7) >> 3);
2464 if (clr > asoc->mapping_array_size) {
2465 clr = asoc->mapping_array_size;
2467 memset(asoc->mapping_array, 0, clr);
2468 memset(asoc->nr_mapping_array, 0, clr);
2470 for (i = 0; i < asoc->mapping_array_size; i++) {
2471 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2472 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2473 sctp_print_mapping_array(asoc);
2477 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2478 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2479 } else if (at >= 8) {
2480 /* we can slide the mapping array down */
2481 /* slide_from holds where we hit the first NON 0xff byte */
2484 * now calculate the ceiling of the move using our highest
2487 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2488 slide_end = (lgap >> 3);
2489 if (slide_end < slide_from) {
2490 sctp_print_mapping_array(asoc);
2492 panic("impossible slide");
2494 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2495 lgap, slide_end, slide_from, at);
2499 if (slide_end > asoc->mapping_array_size) {
2501 panic("would overrun buffer");
2503 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2504 asoc->mapping_array_size, slide_end);
2505 slide_end = asoc->mapping_array_size;
2508 distance = (slide_end - slide_from) + 1;
2509 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2510 sctp_log_map(old_base, old_cumack, old_highest,
2511 SCTP_MAP_PREPARE_SLIDE);
2512 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2513 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2515 if (distance + slide_from > asoc->mapping_array_size ||
2518 * Here we do NOT slide forward the array so that
2519 * hopefully when more data comes in to fill it up
2520 * we will be able to slide it forward. Really I
2521 * don't think this should happen :-0
2524 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2525 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2526 (uint32_t)asoc->mapping_array_size,
2527 SCTP_MAP_SLIDE_NONE);
2532 for (ii = 0; ii < distance; ii++) {
2533 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2534 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2537 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2538 asoc->mapping_array[ii] = 0;
2539 asoc->nr_mapping_array[ii] = 0;
2541 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2542 asoc->highest_tsn_inside_map += (slide_from << 3);
2544 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2545 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2547 asoc->mapping_array_base_tsn += (slide_from << 3);
2548 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2549 sctp_log_map(asoc->mapping_array_base_tsn,
2550 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2551 SCTP_MAP_SLIDE_RESULT);
2558 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2560 struct sctp_association *asoc;
2561 uint32_t highest_tsn;
2564 sctp_slide_mapping_arrays(stcb);
2566 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2567 highest_tsn = asoc->highest_tsn_inside_nr_map;
2569 highest_tsn = asoc->highest_tsn_inside_map;
2571 /* Is there a gap now? */
2572 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2575 * Now we need to see if we need to queue a sack or just start the
2576 * timer (if allowed).
2578 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2580 * Ok special case, in SHUTDOWN-SENT case. here we maker
2581 * sure SACK timer is off and instead send a SHUTDOWN and a
2584 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2585 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2586 stcb->sctp_ep, stcb, NULL,
2587 SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2589 sctp_send_shutdown(stcb,
2590 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2592 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2596 * CMT DAC algorithm: increase number of packets received
2599 stcb->asoc.cmt_dac_pkts_rcvd++;
2601 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2603 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2605 (stcb->asoc.numduptsns) || /* we have dup's */
2606 (is_a_gap) || /* is still a gap */
2607 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2608 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2611 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2612 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2613 (stcb->asoc.send_sack == 0) &&
2614 (stcb->asoc.numduptsns == 0) &&
2615 (stcb->asoc.delayed_ack) &&
2616 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2619 * CMT DAC algorithm: With CMT, delay acks
2620 * even in the face of
2622 * reordering. Therefore, if acks that do
2623 * not have to be sent because of the above
2624 * reasons, will be delayed. That is, acks
2625 * that would have been sent due to gap
2626 * reports will be delayed with DAC. Start
2627 * the delayed ack timer.
2629 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2630 stcb->sctp_ep, stcb, NULL);
2633 * Ok we must build a SACK since the timer
2634 * is pending, we got our first packet OR
2635 * there are gaps or duplicates.
2637 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2638 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2639 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2642 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2643 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2644 stcb->sctp_ep, stcb, NULL);
2651 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2652 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2653 struct sctp_nets *net, uint32_t *high_tsn)
2655 struct sctp_chunkhdr *ch, chunk_buf;
2656 struct sctp_association *asoc;
2657 int num_chunks = 0; /* number of control chunks processed */
2659 int break_flag, last_chunk;
2660 int abort_flag = 0, was_a_gap;
2662 uint32_t highest_tsn;
2663 uint16_t chk_length;
2666 sctp_set_rwnd(stcb, &stcb->asoc);
2669 SCTP_TCB_LOCK_ASSERT(stcb);
2671 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2672 highest_tsn = asoc->highest_tsn_inside_nr_map;
2674 highest_tsn = asoc->highest_tsn_inside_map;
2676 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2678 * setup where we got the last DATA packet from for any SACK that
2679 * may need to go out. Don't bump the net. This is done ONLY when a
2680 * chunk is assigned.
2682 asoc->last_data_chunk_from = net;
2685 * Now before we proceed we must figure out if this is a wasted
2686 * cluster... i.e. it is a small packet sent in and yet the driver
2687 * underneath allocated a full cluster for it. If so we must copy it
2688 * to a smaller mbuf and free up the cluster mbuf. This will help
2689 * with cluster starvation.
2691 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2692 /* we only handle mbufs that are singletons.. not chains */
2693 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2695 /* ok lets see if we can copy the data up */
2698 /* get the pointers and copy */
2699 to = mtod(m, caddr_t *);
2700 from = mtod((*mm), caddr_t *);
2701 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2702 /* copy the length and free up the old */
2703 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2705 /* success, back copy */
2708 /* We are in trouble in the mbuf world .. yikes */
2712 /* get pointer to the first chunk header */
2713 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2714 sizeof(struct sctp_chunkhdr),
2715 (uint8_t *)&chunk_buf);
2720 * process all DATA chunks...
2722 *high_tsn = asoc->cumulative_tsn;
2724 asoc->data_pkts_seen++;
2725 while (stop_proc == 0) {
2726 /* validate chunk length */
2727 chk_length = ntohs(ch->chunk_length);
2728 if (length - *offset < chk_length) {
2729 /* all done, mutulated chunk */
2733 if ((asoc->idata_supported == 1) &&
2734 (ch->chunk_type == SCTP_DATA)) {
2735 struct mbuf *op_err;
2736 char msg[SCTP_DIAG_INFO_LEN];
2738 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2739 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2740 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2741 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2744 if ((asoc->idata_supported == 0) &&
2745 (ch->chunk_type == SCTP_IDATA)) {
2746 struct mbuf *op_err;
2747 char msg[SCTP_DIAG_INFO_LEN];
2749 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2750 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2751 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2752 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2755 if ((ch->chunk_type == SCTP_DATA) ||
2756 (ch->chunk_type == SCTP_IDATA)) {
2759 if (ch->chunk_type == SCTP_DATA) {
2760 clen = sizeof(struct sctp_data_chunk);
2762 clen = sizeof(struct sctp_idata_chunk);
2764 if (chk_length < clen) {
2766 * Need to send an abort since we had a
2767 * invalid data chunk.
2769 struct mbuf *op_err;
2770 char msg[SCTP_DIAG_INFO_LEN];
2772 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2773 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2775 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2776 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2777 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2780 #ifdef SCTP_AUDITING_ENABLED
2781 sctp_audit_log(0xB1, 0);
2783 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2788 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2789 chk_length, net, high_tsn, &abort_flag, &break_flag,
2790 last_chunk, ch->chunk_type)) {
2798 * Set because of out of rwnd space and no
2799 * drop rep space left.
2805 /* not a data chunk in the data region */
2806 switch (ch->chunk_type) {
2807 case SCTP_INITIATION:
2808 case SCTP_INITIATION_ACK:
2809 case SCTP_SELECTIVE_ACK:
2810 case SCTP_NR_SELECTIVE_ACK:
2811 case SCTP_HEARTBEAT_REQUEST:
2812 case SCTP_HEARTBEAT_ACK:
2813 case SCTP_ABORT_ASSOCIATION:
2815 case SCTP_SHUTDOWN_ACK:
2816 case SCTP_OPERATION_ERROR:
2817 case SCTP_COOKIE_ECHO:
2818 case SCTP_COOKIE_ACK:
2821 case SCTP_SHUTDOWN_COMPLETE:
2822 case SCTP_AUTHENTICATION:
2823 case SCTP_ASCONF_ACK:
2824 case SCTP_PACKET_DROPPED:
2825 case SCTP_STREAM_RESET:
2826 case SCTP_FORWARD_CUM_TSN:
2830 * Now, what do we do with KNOWN
2831 * chunks that are NOT in the right
2834 * For now, I do nothing but ignore
2835 * them. We may later want to add
2836 * sysctl stuff to switch out and do
2837 * either an ABORT() or possibly
2840 struct mbuf *op_err;
2841 char msg[SCTP_DIAG_INFO_LEN];
2843 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2845 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2846 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2851 * Unknown chunk type: use bit rules after
2854 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2856 * Need to send an abort since we
2857 * had a invalid chunk.
2859 struct mbuf *op_err;
2860 char msg[SCTP_DIAG_INFO_LEN];
2862 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2863 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2864 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2865 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2868 if (ch->chunk_type & 0x40) {
2869 /* Add a error report to the queue */
2870 struct mbuf *op_err;
2871 struct sctp_gen_error_cause *cause;
2873 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2874 0, M_NOWAIT, 1, MT_DATA);
2875 if (op_err != NULL) {
2876 cause = mtod(op_err, struct sctp_gen_error_cause *);
2877 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2878 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2879 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2880 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2881 if (SCTP_BUF_NEXT(op_err) != NULL) {
2882 sctp_queue_op_err(stcb, op_err);
2884 sctp_m_freem(op_err);
2888 if ((ch->chunk_type & 0x80) == 0) {
2889 /* discard the rest of this packet */
2891 } /* else skip this bad chunk and
2894 } /* switch of chunk type */
2896 *offset += SCTP_SIZE32(chk_length);
2897 if ((*offset >= length) || stop_proc) {
2898 /* no more data left in the mbuf chain */
2902 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2903 sizeof(struct sctp_chunkhdr),
2904 (uint8_t *)&chunk_buf);
2913 * we need to report rwnd overrun drops.
2915 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2919 * Did we get data, if so update the time for auto-close and
2920 * give peer credit for being alive.
2922 SCTP_STAT_INCR(sctps_recvpktwithdata);
2923 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2924 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2925 stcb->asoc.overall_error_count,
2927 SCTP_FROM_SCTP_INDATA,
2930 stcb->asoc.overall_error_count = 0;
2931 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2933 /* now service all of the reassm queue if needed */
2934 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2935 /* Assure that we ack right away */
2936 stcb->asoc.send_sack = 1;
2938 /* Start a sack timer or QUEUE a SACK for sending */
2939 sctp_sack_check(stcb, was_a_gap);
2944 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2945 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2947 uint32_t *biggest_newly_acked_tsn,
2948 uint32_t *this_sack_lowest_newack,
2951 struct sctp_tmit_chunk *tp1;
2952 unsigned int theTSN;
2953 int j, wake_him = 0, circled = 0;
2955 /* Recover the tp1 we last saw */
2958 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2960 for (j = frag_strt; j <= frag_end; j++) {
2961 theTSN = j + last_tsn;
2963 if (tp1->rec.data.doing_fast_retransmit)
2967 * CMT: CUCv2 algorithm. For each TSN being
2968 * processed from the sent queue, track the
2969 * next expected pseudo-cumack, or
2970 * rtx_pseudo_cumack, if required. Separate
2971 * cumack trackers for first transmissions,
2972 * and retransmissions.
2974 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2975 (tp1->whoTo->find_pseudo_cumack == 1) &&
2976 (tp1->snd_count == 1)) {
2977 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2978 tp1->whoTo->find_pseudo_cumack = 0;
2980 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2981 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2982 (tp1->snd_count > 1)) {
2983 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2984 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2986 if (tp1->rec.data.tsn == theTSN) {
2987 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2989 * must be held until
2992 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2994 * If it is less than RESEND, it is
2995 * now no-longer in flight.
2996 * Higher values may already be set
2997 * via previous Gap Ack Blocks...
2998 * i.e. ACKED or RESEND.
3000 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3001 *biggest_newly_acked_tsn)) {
3002 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3005 * CMT: SFR algo (and HTNA) - set
3006 * saw_newack to 1 for dest being
3007 * newly acked. update
3008 * this_sack_highest_newack if
3011 if (tp1->rec.data.chunk_was_revoked == 0)
3012 tp1->whoTo->saw_newack = 1;
3014 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3015 tp1->whoTo->this_sack_highest_newack)) {
3016 tp1->whoTo->this_sack_highest_newack =
3020 * CMT DAC algo: also update
3021 * this_sack_lowest_newack
3023 if (*this_sack_lowest_newack == 0) {
3024 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3025 sctp_log_sack(*this_sack_lowest_newack,
3030 SCTP_LOG_TSN_ACKED);
3032 *this_sack_lowest_newack = tp1->rec.data.tsn;
3035 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3036 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3037 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3038 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3039 * Separate pseudo_cumack trackers for first transmissions and
3042 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3043 if (tp1->rec.data.chunk_was_revoked == 0) {
3044 tp1->whoTo->new_pseudo_cumack = 1;
3046 tp1->whoTo->find_pseudo_cumack = 1;
3048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3049 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3051 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3052 if (tp1->rec.data.chunk_was_revoked == 0) {
3053 tp1->whoTo->new_pseudo_cumack = 1;
3055 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3058 sctp_log_sack(*biggest_newly_acked_tsn,
3063 SCTP_LOG_TSN_ACKED);
3065 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3066 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3067 tp1->whoTo->flight_size,
3069 (uint32_t)(uintptr_t)tp1->whoTo,
3072 sctp_flight_size_decrease(tp1);
3073 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3074 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3077 sctp_total_flight_decrease(stcb, tp1);
3079 tp1->whoTo->net_ack += tp1->send_size;
3080 if (tp1->snd_count < 2) {
3082 * True non-retransmitted chunk
3084 tp1->whoTo->net_ack2 += tp1->send_size;
3091 sctp_calculate_rto(stcb,
3094 &tp1->sent_rcv_time,
3095 SCTP_RTT_FROM_DATA)) {
3098 if (tp1->whoTo->rto_needed == 0) {
3099 tp1->whoTo->rto_needed = 1;
3106 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3107 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3108 stcb->asoc.this_sack_highest_gap)) {
3109 stcb->asoc.this_sack_highest_gap =
3112 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3113 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3114 #ifdef SCTP_AUDITING_ENABLED
3115 sctp_audit_log(0xB2,
3116 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3121 * All chunks NOT UNSENT fall through here and are marked
3122 * (leave PR-SCTP ones that are to skip alone though)
3124 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3125 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3126 tp1->sent = SCTP_DATAGRAM_MARKED;
3128 if (tp1->rec.data.chunk_was_revoked) {
3129 /* deflate the cwnd */
3130 tp1->whoTo->cwnd -= tp1->book_size;
3131 tp1->rec.data.chunk_was_revoked = 0;
3133 /* NR Sack code here */
3135 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3136 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3137 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3140 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3143 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3144 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3145 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3146 stcb->asoc.trigger_reset = 1;
3148 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3154 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3155 sctp_m_freem(tp1->data);
3162 } /* if (tp1->tsn == theTSN) */
3163 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3166 tp1 = TAILQ_NEXT(tp1, sctp_next);
3167 if ((tp1 == NULL) && (circled == 0)) {
3169 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3171 } /* end while (tp1) */
3174 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3176 /* In case the fragments were not in order we must reset */
3177 } /* end for (j = fragStart */
3179 return (wake_him); /* Return value only used for nr-sack */
3184 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3185 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3186 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3187 int num_seg, int num_nr_seg, int *rto_ok)
3189 struct sctp_gap_ack_block *frag, block;
3190 struct sctp_tmit_chunk *tp1;
3195 uint16_t frag_strt, frag_end, prev_frag_end;
3197 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3201 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3204 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3206 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3207 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3208 *offset += sizeof(block);
3210 return (chunk_freed);
3212 frag_strt = ntohs(frag->start);
3213 frag_end = ntohs(frag->end);
3215 if (frag_strt > frag_end) {
3216 /* This gap report is malformed, skip it. */
3219 if (frag_strt <= prev_frag_end) {
3220 /* This gap report is not in order, so restart. */
3221 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3223 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3224 *biggest_tsn_acked = last_tsn + frag_end;
3231 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3232 non_revocable, &num_frs, biggest_newly_acked_tsn,
3233 this_sack_lowest_newack, rto_ok)) {
3236 prev_frag_end = frag_end;
3238 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3240 sctp_log_fr(*biggest_tsn_acked,
3241 *biggest_newly_acked_tsn,
3242 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3244 return (chunk_freed);
3248 sctp_check_for_revoked(struct sctp_tcb *stcb,
3249 struct sctp_association *asoc, uint32_t cumack,
3250 uint32_t biggest_tsn_acked)
3252 struct sctp_tmit_chunk *tp1;
3254 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3255 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3257 * ok this guy is either ACK or MARKED. If it is
3258 * ACKED it has been previously acked but not this
3259 * time i.e. revoked. If it is MARKED it was ACK'ed
3262 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3265 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3266 /* it has been revoked */
3267 tp1->sent = SCTP_DATAGRAM_SENT;
3268 tp1->rec.data.chunk_was_revoked = 1;
3270 * We must add this stuff back in to assure
3271 * timers and such get started.
3273 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3274 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3275 tp1->whoTo->flight_size,
3277 (uint32_t)(uintptr_t)tp1->whoTo,
3280 sctp_flight_size_increase(tp1);
3281 sctp_total_flight_increase(stcb, tp1);
3283 * We inflate the cwnd to compensate for our
3284 * artificial inflation of the flight_size.
3286 tp1->whoTo->cwnd += tp1->book_size;
3287 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3288 sctp_log_sack(asoc->last_acked_seq,
3293 SCTP_LOG_TSN_REVOKED);
3295 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3296 /* it has been re-acked in this SACK */
3297 tp1->sent = SCTP_DATAGRAM_ACKED;
3300 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3307 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3308 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3310 struct sctp_tmit_chunk *tp1;
3311 int strike_flag = 0;
3313 int tot_retrans = 0;
3314 uint32_t sending_seq;
3315 struct sctp_nets *net;
3316 int num_dests_sacked = 0;
3319 * select the sending_seq, this is either the next thing ready to be
3320 * sent but not transmitted, OR, the next seq we assign.
3322 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3324 sending_seq = asoc->sending_seq;
3326 sending_seq = tp1->rec.data.tsn;
3329 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3330 if ((asoc->sctp_cmt_on_off > 0) &&
3331 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3332 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3333 if (net->saw_newack)
3337 if (stcb->asoc.prsctp_supported) {
3338 (void)SCTP_GETTIME_TIMEVAL(&now);
3340 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3342 if (tp1->no_fr_allowed) {
3343 /* this one had a timeout or something */
3346 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3347 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3348 sctp_log_fr(biggest_tsn_newly_acked,
3351 SCTP_FR_LOG_CHECK_STRIKE);
3353 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3354 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3358 if (stcb->asoc.prsctp_supported) {
3359 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3360 /* Is it expired? */
3361 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3362 /* Yes so drop it */
3363 if (tp1->data != NULL) {
3364 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3365 SCTP_SO_NOT_LOCKED);
3372 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3373 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3374 /* we are beyond the tsn in the sack */
3377 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3378 /* either a RESEND, ACKED, or MARKED */
3380 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3381 /* Continue strikin FWD-TSN chunks */
3382 tp1->rec.data.fwd_tsn_cnt++;
3387 * CMT : SFR algo (covers part of DAC and HTNA as well)
3389 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3391 * No new acks were receieved for data sent to this
3392 * dest. Therefore, according to the SFR algo for
3393 * CMT, no data sent to this dest can be marked for
3394 * FR using this SACK.
3397 } else if (tp1->whoTo &&
3398 SCTP_TSN_GT(tp1->rec.data.tsn,
3399 tp1->whoTo->this_sack_highest_newack) &&
3400 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3402 * CMT: New acks were receieved for data sent to
3403 * this dest. But no new acks were seen for data
3404 * sent after tp1. Therefore, according to the SFR
3405 * algo for CMT, tp1 cannot be marked for FR using
3406 * this SACK. This step covers part of the DAC algo
3407 * and the HTNA algo as well.
3412 * Here we check to see if we were have already done a FR
3413 * and if so we see if the biggest TSN we saw in the sack is
3414 * smaller than the recovery point. If so we don't strike
3415 * the tsn... otherwise we CAN strike the TSN.
3418 * @@@ JRI: Check for CMT if (accum_moved &&
3419 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3422 if (accum_moved && asoc->fast_retran_loss_recovery) {
3424 * Strike the TSN if in fast-recovery and cum-ack
3427 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3428 sctp_log_fr(biggest_tsn_newly_acked,
3431 SCTP_FR_LOG_STRIKE_CHUNK);
3433 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3436 if ((asoc->sctp_cmt_on_off > 0) &&
3437 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3439 * CMT DAC algorithm: If SACK flag is set to
3440 * 0, then lowest_newack test will not pass
3441 * because it would have been set to the
3442 * cumack earlier. If not already to be
3443 * rtx'd, If not a mixed sack and if tp1 is
3444 * not between two sacked TSNs, then mark by
3445 * one more. NOTE that we are marking by one
3446 * additional time since the SACK DAC flag
3447 * indicates that two packets have been
3448 * received after this missing TSN.
3450 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3451 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3452 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3453 sctp_log_fr(16 + num_dests_sacked,
3456 SCTP_FR_LOG_STRIKE_CHUNK);
3461 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3462 (asoc->sctp_cmt_on_off == 0)) {
3464 * For those that have done a FR we must take
3465 * special consideration if we strike. I.e the
3466 * biggest_newly_acked must be higher than the
3467 * sending_seq at the time we did the FR.
3470 #ifdef SCTP_FR_TO_ALTERNATE
3472 * If FR's go to new networks, then we must only do
3473 * this for singly homed asoc's. However if the FR's
3474 * go to the same network (Armando's work) then its
3475 * ok to FR multiple times.
3483 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3484 tp1->rec.data.fast_retran_tsn)) {
3486 * Strike the TSN, since this ack is
3487 * beyond where things were when we
3490 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3491 sctp_log_fr(biggest_tsn_newly_acked,
3494 SCTP_FR_LOG_STRIKE_CHUNK);
3496 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3500 if ((asoc->sctp_cmt_on_off > 0) &&
3501 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3503 * CMT DAC algorithm: If
3504 * SACK flag is set to 0,
3505 * then lowest_newack test
3506 * will not pass because it
3507 * would have been set to
3508 * the cumack earlier. If
3509 * not already to be rtx'd,
3510 * If not a mixed sack and
3511 * if tp1 is not between two
3512 * sacked TSNs, then mark by
3513 * one more. NOTE that we
3514 * are marking by one
3515 * additional time since the
3516 * SACK DAC flag indicates
3517 * that two packets have
3518 * been received after this
3521 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3522 (num_dests_sacked == 1) &&
3523 SCTP_TSN_GT(this_sack_lowest_newack,
3524 tp1->rec.data.tsn)) {
3525 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3526 sctp_log_fr(32 + num_dests_sacked,
3529 SCTP_FR_LOG_STRIKE_CHUNK);
3531 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3539 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3542 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3543 biggest_tsn_newly_acked)) {
3545 * We don't strike these: This is the HTNA
3546 * algorithm i.e. we don't strike If our TSN is
3547 * larger than the Highest TSN Newly Acked.
3551 /* Strike the TSN */
3552 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3553 sctp_log_fr(biggest_tsn_newly_acked,
3556 SCTP_FR_LOG_STRIKE_CHUNK);
3558 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3561 if ((asoc->sctp_cmt_on_off > 0) &&
3562 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3564 * CMT DAC algorithm: If SACK flag is set to
3565 * 0, then lowest_newack test will not pass
3566 * because it would have been set to the
3567 * cumack earlier. If not already to be
3568 * rtx'd, If not a mixed sack and if tp1 is
3569 * not between two sacked TSNs, then mark by
3570 * one more. NOTE that we are marking by one
3571 * additional time since the SACK DAC flag
3572 * indicates that two packets have been
3573 * received after this missing TSN.
3575 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3576 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3577 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3578 sctp_log_fr(48 + num_dests_sacked,
3581 SCTP_FR_LOG_STRIKE_CHUNK);
3587 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3588 struct sctp_nets *alt;
3590 /* fix counts and things */
3591 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3592 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3593 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3595 (uint32_t)(uintptr_t)tp1->whoTo,
3599 tp1->whoTo->net_ack++;
3600 sctp_flight_size_decrease(tp1);
3601 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3602 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3607 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3608 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3609 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3611 /* add back to the rwnd */
3612 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3614 /* remove from the total flight */
3615 sctp_total_flight_decrease(stcb, tp1);
3617 if ((stcb->asoc.prsctp_supported) &&
3618 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3620 * Has it been retransmitted tv_sec times? -
3621 * we store the retran count there.
3623 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3624 /* Yes, so drop it */
3625 if (tp1->data != NULL) {
3626 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3627 SCTP_SO_NOT_LOCKED);
3629 /* Make sure to flag we had a FR */
3630 if (tp1->whoTo != NULL) {
3631 tp1->whoTo->net_ack++;
3637 * SCTP_PRINTF("OK, we are now ready to FR this
3640 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3641 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3645 /* This is a subsequent FR */
3646 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3648 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3649 if (asoc->sctp_cmt_on_off > 0) {
3651 * CMT: Using RTX_SSTHRESH policy for CMT.
3652 * If CMT is being used, then pick dest with
3653 * largest ssthresh for any retransmission.
3655 tp1->no_fr_allowed = 1;
3657 /* sa_ignore NO_NULL_CHK */
3658 if (asoc->sctp_cmt_pf > 0) {
3660 * JRS 5/18/07 - If CMT PF is on,
3661 * use the PF version of
3664 alt = sctp_find_alternate_net(stcb, alt, 2);
3667 * JRS 5/18/07 - If only CMT is on,
3668 * use the CMT version of
3671 /* sa_ignore NO_NULL_CHK */
3672 alt = sctp_find_alternate_net(stcb, alt, 1);
3678 * CUCv2: If a different dest is picked for
3679 * the retransmission, then new
3680 * (rtx-)pseudo_cumack needs to be tracked
3681 * for orig dest. Let CUCv2 track new (rtx-)
3682 * pseudo-cumack always.
3685 tp1->whoTo->find_pseudo_cumack = 1;
3686 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3689 } else { /* CMT is OFF */
3691 #ifdef SCTP_FR_TO_ALTERNATE
3692 /* Can we find an alternate? */
3693 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3696 * default behavior is to NOT retransmit
3697 * FR's to an alternate. Armando Caro's
3698 * paper details why.
3704 tp1->rec.data.doing_fast_retransmit = 1;
3706 /* mark the sending seq for possible subsequent FR's */
3708 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3709 * (uint32_t)tpi->rec.data.tsn);
3711 if (TAILQ_EMPTY(&asoc->send_queue)) {
3713 * If the queue of send is empty then its
3714 * the next sequence number that will be
3715 * assigned so we subtract one from this to
3716 * get the one we last sent.
3718 tp1->rec.data.fast_retran_tsn = sending_seq;
3721 * If there are chunks on the send queue
3722 * (unsent data that has made it from the
3723 * stream queues but not out the door, we
3724 * take the first one (which will have the
3725 * lowest TSN) and subtract one to get the
3728 struct sctp_tmit_chunk *ttt;
3730 ttt = TAILQ_FIRST(&asoc->send_queue);
3731 tp1->rec.data.fast_retran_tsn =
3737 * this guy had a RTO calculation pending on
3740 if ((tp1->whoTo != NULL) &&
3741 (tp1->whoTo->rto_needed == 0)) {
3742 tp1->whoTo->rto_needed = 1;
3746 if (alt != tp1->whoTo) {
3747 /* yes, there is an alternate. */
3748 sctp_free_remote_addr(tp1->whoTo);
3749 /* sa_ignore FREED_MEMORY */
3751 atomic_add_int(&alt->ref_count, 1);
3757 struct sctp_tmit_chunk *
3758 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3759 struct sctp_association *asoc)
3761 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3765 if (asoc->prsctp_supported == 0) {
3768 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3769 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3770 tp1->sent != SCTP_DATAGRAM_RESEND &&
3771 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3772 /* no chance to advance, out of here */
3775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3776 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3777 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3778 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3779 asoc->advanced_peer_ack_point,
3780 tp1->rec.data.tsn, 0, 0);
3783 if (!PR_SCTP_ENABLED(tp1->flags)) {
3785 * We can't fwd-tsn past any that are reliable aka
3786 * retransmitted until the asoc fails.
3791 (void)SCTP_GETTIME_TIMEVAL(&now);
3795 * now we got a chunk which is marked for another
3796 * retransmission to a PR-stream but has run out its chances
3797 * already maybe OR has been marked to skip now. Can we skip
3798 * it if its a resend?
3800 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3801 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3803 * Now is this one marked for resend and its time is
3806 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3807 /* Yes so drop it */
3809 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3810 1, SCTP_SO_NOT_LOCKED);
3814 * No, we are done when hit one for resend
3815 * whos time as not expired.
3821 * Ok now if this chunk is marked to drop it we can clean up
3822 * the chunk, advance our peer ack point and we can check
3825 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3826 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3827 /* advance PeerAckPoint goes forward */
3828 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3829 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3831 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3832 /* No update but we do save the chk */
3837 * If it is still in RESEND we can advance no
3847 sctp_fs_audit(struct sctp_association *asoc)
3849 struct sctp_tmit_chunk *chk;
3850 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3853 int entry_flight, entry_cnt;
3858 entry_flight = asoc->total_flight;
3859 entry_cnt = asoc->total_flight_count;
3861 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3864 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3865 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3866 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3871 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3873 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3875 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3882 if ((inflight > 0) || (inbetween > 0)) {
3884 panic("Flight size-express incorrect? \n");
3886 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3887 entry_flight, entry_cnt);
3889 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3890 inflight, inbetween, resend, above, acked);
3899 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3900 struct sctp_association *asoc,
3901 struct sctp_tmit_chunk *tp1)
3903 tp1->window_probe = 0;
3904 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3905 /* TSN's skipped we do NOT move back. */
3906 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3907 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3909 (uint32_t)(uintptr_t)tp1->whoTo,
3913 /* First setup this by shrinking flight */
3914 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3915 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3918 sctp_flight_size_decrease(tp1);
3919 sctp_total_flight_decrease(stcb, tp1);
3920 /* Now mark for resend */
3921 tp1->sent = SCTP_DATAGRAM_RESEND;
3922 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3924 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3925 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3926 tp1->whoTo->flight_size,
3928 (uint32_t)(uintptr_t)tp1->whoTo,
3934 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3935 uint32_t rwnd, int *abort_now, int ecne_seen)
3937 struct sctp_nets *net;
3938 struct sctp_association *asoc;
3939 struct sctp_tmit_chunk *tp1, *tp2;
3941 int win_probe_recovery = 0;
3942 int win_probe_recovered = 0;
3943 int j, done_once = 0;
3947 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3948 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3949 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3951 SCTP_TCB_LOCK_ASSERT(stcb);
3952 #ifdef SCTP_ASOCLOG_OF_TSNS
3953 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3954 stcb->asoc.cumack_log_at++;
3955 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3956 stcb->asoc.cumack_log_at = 0;
3960 old_rwnd = asoc->peers_rwnd;
3961 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3964 } else if (asoc->last_acked_seq == cumack) {
3965 /* Window update sack */
3966 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3967 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3968 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3969 /* SWS sender side engages */
3970 asoc->peers_rwnd = 0;
3972 if (asoc->peers_rwnd > old_rwnd) {
3978 /* First setup for CC stuff */
3979 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3980 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3981 /* Drag along the window_tsn for cwr's */
3982 net->cwr_window_tsn = cumack;
3984 net->prev_cwnd = net->cwnd;
3989 * CMT: Reset CUC and Fast recovery algo variables before
3992 net->new_pseudo_cumack = 0;
3993 net->will_exit_fast_recovery = 0;
3994 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3995 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3998 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3999 tp1 = TAILQ_LAST(&asoc->sent_queue,
4000 sctpchunk_listhead);
4001 send_s = tp1->rec.data.tsn + 1;
4003 send_s = asoc->sending_seq;
4005 if (SCTP_TSN_GE(cumack, send_s)) {
4006 struct mbuf *op_err;
4007 char msg[SCTP_DIAG_INFO_LEN];
4011 SCTP_SNPRINTF(msg, sizeof(msg),
4012 "Cum ack %8.8x greater or equal than TSN %8.8x",
4014 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4015 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4016 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4019 asoc->this_sack_highest_gap = cumack;
4020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4021 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4022 stcb->asoc.overall_error_count,
4024 SCTP_FROM_SCTP_INDATA,
4027 stcb->asoc.overall_error_count = 0;
4028 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4029 /* process the new consecutive TSN first */
4030 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4031 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4032 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4033 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4035 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4037 * If it is less than ACKED, it is
4038 * now no-longer in flight. Higher
4039 * values may occur during marking
4041 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4042 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4043 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4044 tp1->whoTo->flight_size,
4046 (uint32_t)(uintptr_t)tp1->whoTo,
4049 sctp_flight_size_decrease(tp1);
4050 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4051 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4054 /* sa_ignore NO_NULL_CHK */
4055 sctp_total_flight_decrease(stcb, tp1);
4057 tp1->whoTo->net_ack += tp1->send_size;
4058 if (tp1->snd_count < 2) {
4060 * True non-retransmitted
4063 tp1->whoTo->net_ack2 +=
4066 /* update RTO too? */
4069 sctp_calculate_rto(stcb,
4072 &tp1->sent_rcv_time,
4073 SCTP_RTT_FROM_DATA)) {
4076 if (tp1->whoTo->rto_needed == 0) {
4077 tp1->whoTo->rto_needed = 1;
4083 * CMT: CUCv2 algorithm. From the
4084 * cumack'd TSNs, for each TSN being
4085 * acked for the first time, set the
4086 * following variables for the
4087 * corresp destination.
4088 * new_pseudo_cumack will trigger a
4090 * find_(rtx_)pseudo_cumack will
4091 * trigger search for the next
4092 * expected (rtx-)pseudo-cumack.
4094 tp1->whoTo->new_pseudo_cumack = 1;
4095 tp1->whoTo->find_pseudo_cumack = 1;
4096 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4098 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4099 /* sa_ignore NO_NULL_CHK */
4100 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4103 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4104 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4106 if (tp1->rec.data.chunk_was_revoked) {
4107 /* deflate the cwnd */
4108 tp1->whoTo->cwnd -= tp1->book_size;
4109 tp1->rec.data.chunk_was_revoked = 0;
4111 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4112 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4113 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4116 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4120 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4121 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4122 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4123 asoc->trigger_reset = 1;
4125 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4127 /* sa_ignore NO_NULL_CHK */
4128 sctp_free_bufspace(stcb, asoc, tp1, 1);
4129 sctp_m_freem(tp1->data);
4132 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4133 sctp_log_sack(asoc->last_acked_seq,
4138 SCTP_LOG_FREE_SENT);
4140 asoc->sent_queue_cnt--;
4141 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4148 /* sa_ignore NO_NULL_CHK */
4149 if (stcb->sctp_socket) {
4150 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4151 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4152 /* sa_ignore NO_NULL_CHK */
4153 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4155 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4157 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4158 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4162 /* JRS - Use the congestion control given in the CC module */
4163 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4164 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4165 if (net->net_ack2 > 0) {
4167 * Karn's rule applies to clearing error
4168 * count, this is optional.
4170 net->error_count = 0;
4171 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4172 /* addr came good */
4173 net->dest_state |= SCTP_ADDR_REACHABLE;
4174 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4175 0, (void *)net, SCTP_SO_NOT_LOCKED);
4177 if (net == stcb->asoc.primary_destination) {
4178 if (stcb->asoc.alternate) {
4180 * release the alternate,
4183 sctp_free_remote_addr(stcb->asoc.alternate);
4184 stcb->asoc.alternate = NULL;
4187 if (net->dest_state & SCTP_ADDR_PF) {
4188 net->dest_state &= ~SCTP_ADDR_PF;
4189 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4190 stcb->sctp_ep, stcb, net,
4191 SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4192 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4193 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4194 /* Done with this net */
4197 /* restore any doubled timers */
4198 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4199 if (net->RTO < stcb->asoc.minrto) {
4200 net->RTO = stcb->asoc.minrto;
4202 if (net->RTO > stcb->asoc.maxrto) {
4203 net->RTO = stcb->asoc.maxrto;
4207 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4209 asoc->last_acked_seq = cumack;
4211 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4212 /* nothing left in-flight */
4213 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4214 net->flight_size = 0;
4215 net->partial_bytes_acked = 0;
4217 asoc->total_flight = 0;
4218 asoc->total_flight_count = 0;
4222 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4223 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4224 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4225 /* SWS sender side engages */
4226 asoc->peers_rwnd = 0;
4228 if (asoc->peers_rwnd > old_rwnd) {
4229 win_probe_recovery = 1;
4231 /* Now assure a timer where data is queued at */
4234 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4235 if (win_probe_recovery && (net->window_probe)) {
4236 win_probe_recovered = 1;
4238 * Find first chunk that was used with window probe
4239 * and clear the sent
4241 /* sa_ignore FREED_MEMORY */
4242 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4243 if (tp1->window_probe) {
4244 /* move back to data send queue */
4245 sctp_window_probe_recovery(stcb, asoc, tp1);
4250 if (net->flight_size) {
4252 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4253 if (net->window_probe) {
4254 net->window_probe = 0;
4257 if (net->window_probe) {
4259 * In window probes we must assure a timer
4260 * is still running there
4262 net->window_probe = 0;
4263 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4264 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4266 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4267 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4269 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4274 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4275 (asoc->sent_queue_retran_cnt == 0) &&
4276 (win_probe_recovered == 0) &&
4279 * huh, this should not happen unless all packets are
4280 * PR-SCTP and marked to skip of course.
4282 if (sctp_fs_audit(asoc)) {
4283 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4284 net->flight_size = 0;
4286 asoc->total_flight = 0;
4287 asoc->total_flight_count = 0;
4288 asoc->sent_queue_retran_cnt = 0;
4289 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4290 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4291 sctp_flight_size_increase(tp1);
4292 sctp_total_flight_increase(stcb, tp1);
4293 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4294 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4301 /**********************************/
4302 /* Now what about shutdown issues */
4303 /**********************************/
4304 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4305 /* nothing left on sendqueue.. consider done */
4307 if ((asoc->stream_queue_cnt == 1) &&
4308 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4309 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4310 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4311 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4313 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4314 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4315 (asoc->stream_queue_cnt == 1) &&
4316 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4317 struct mbuf *op_err;
4321 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4322 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4323 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4326 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4327 (asoc->stream_queue_cnt == 0)) {
4328 struct sctp_nets *netp;
4330 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4331 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4332 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4334 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4335 sctp_stop_timers_for_shutdown(stcb);
4336 if (asoc->alternate) {
4337 netp = asoc->alternate;
4339 netp = asoc->primary_destination;
4341 sctp_send_shutdown(stcb, netp);
4342 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4343 stcb->sctp_ep, stcb, netp);
4344 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4345 stcb->sctp_ep, stcb, NULL);
4346 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4347 (asoc->stream_queue_cnt == 0)) {
4348 struct sctp_nets *netp;
4350 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4351 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4352 sctp_stop_timers_for_shutdown(stcb);
4353 if (asoc->alternate) {
4354 netp = asoc->alternate;
4356 netp = asoc->primary_destination;
4358 sctp_send_shutdown_ack(stcb, netp);
4359 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4360 stcb->sctp_ep, stcb, netp);
4363 /*********************************************/
4364 /* Here we perform PR-SCTP procedures */
4366 /*********************************************/
4367 /* C1. update advancedPeerAckPoint */
4368 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4369 asoc->advanced_peer_ack_point = cumack;
4371 /* PR-Sctp issues need to be addressed too */
4372 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4373 struct sctp_tmit_chunk *lchk;
4374 uint32_t old_adv_peer_ack_point;
4376 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4377 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4378 /* C3. See if we need to send a Fwd-TSN */
4379 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4381 * ISSUE with ECN, see FWD-TSN processing.
4383 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4384 send_forward_tsn(stcb, asoc);
4386 /* try to FR fwd-tsn's that get lost too */
4387 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4388 send_forward_tsn(stcb, asoc);
4392 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4393 if (lchk->whoTo != NULL) {
4398 /* Assure a timer is up */
4399 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4400 stcb->sctp_ep, stcb, lchk->whoTo);
4403 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4404 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4406 stcb->asoc.peers_rwnd,
4407 stcb->asoc.total_flight,
4408 stcb->asoc.total_output_queue_size);
4413 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4414 struct sctp_tcb *stcb,
4415 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4416 int *abort_now, uint8_t flags,
4417 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4419 struct sctp_association *asoc;
4420 struct sctp_tmit_chunk *tp1, *tp2;
4421 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4422 uint16_t wake_him = 0;
4423 uint32_t send_s = 0;
4425 int accum_moved = 0;
4426 int will_exit_fast_recovery = 0;
4427 uint32_t a_rwnd, old_rwnd;
4428 int win_probe_recovery = 0;
4429 int win_probe_recovered = 0;
4430 struct sctp_nets *net = NULL;
4433 uint8_t reneged_all = 0;
4434 uint8_t cmt_dac_flag;
4437 * we take any chance we can to service our queues since we cannot
4438 * get awoken when the socket is read from :<
4441 * Now perform the actual SACK handling: 1) Verify that it is not an
4442 * old sack, if so discard. 2) If there is nothing left in the send
4443 * queue (cum-ack is equal to last acked) then you have a duplicate
4444 * too, update any rwnd change and verify no timers are running.
4445 * then return. 3) Process any new consequtive data i.e. cum-ack
4446 * moved process these first and note that it moved. 4) Process any
4447 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4448 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4449 * sync up flightsizes and things, stop all timers and also check
4450 * for shutdown_pending state. If so then go ahead and send off the
4451 * shutdown. If in shutdown recv, send off the shutdown-ack and
4452 * start that timer, Ret. 9) Strike any non-acked things and do FR
4453 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4454 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4455 * if in shutdown_recv state.
4457 SCTP_TCB_LOCK_ASSERT(stcb);
4459 this_sack_lowest_newack = 0;
4460 SCTP_STAT_INCR(sctps_slowpath_sack);
4462 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4463 #ifdef SCTP_ASOCLOG_OF_TSNS
4464 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4465 stcb->asoc.cumack_log_at++;
4466 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4467 stcb->asoc.cumack_log_at = 0;
4472 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4473 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4474 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4477 old_rwnd = stcb->asoc.peers_rwnd;
4478 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4479 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4480 stcb->asoc.overall_error_count,
4482 SCTP_FROM_SCTP_INDATA,
4485 stcb->asoc.overall_error_count = 0;
4487 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4488 sctp_log_sack(asoc->last_acked_seq,
4495 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4497 uint32_t *dupdata, dblock;
4499 for (i = 0; i < num_dup; i++) {
4500 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4501 sizeof(uint32_t), (uint8_t *)&dblock);
4502 if (dupdata == NULL) {
4505 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4509 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4510 tp1 = TAILQ_LAST(&asoc->sent_queue,
4511 sctpchunk_listhead);
4512 send_s = tp1->rec.data.tsn + 1;
4515 send_s = asoc->sending_seq;
4517 if (SCTP_TSN_GE(cum_ack, send_s)) {
4518 struct mbuf *op_err;
4519 char msg[SCTP_DIAG_INFO_LEN];
4522 * no way, we have not even sent this TSN out yet. Peer is
4523 * hopelessly messed up with us.
4525 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4528 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4529 tp1->rec.data.tsn, (void *)tp1);
4534 SCTP_SNPRINTF(msg, sizeof(msg),
4535 "Cum ack %8.8x greater or equal than TSN %8.8x",
4537 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4538 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4539 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4542 /**********************/
4543 /* 1) check the range */
4544 /**********************/
4545 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4546 /* acking something behind */
4550 /* update the Rwnd of the peer */
4551 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4552 TAILQ_EMPTY(&asoc->send_queue) &&
4553 (asoc->stream_queue_cnt == 0)) {
4554 /* nothing left on send/sent and strmq */
4555 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4556 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4557 asoc->peers_rwnd, 0, 0, a_rwnd);
4559 asoc->peers_rwnd = a_rwnd;
4560 if (asoc->sent_queue_retran_cnt) {
4561 asoc->sent_queue_retran_cnt = 0;
4563 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4564 /* SWS sender side engages */
4565 asoc->peers_rwnd = 0;
4567 /* stop any timers */
4568 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4569 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4570 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4571 net->partial_bytes_acked = 0;
4572 net->flight_size = 0;
4574 asoc->total_flight = 0;
4575 asoc->total_flight_count = 0;
4579 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4580 * things. The total byte count acked is tracked in netAckSz AND
4581 * netAck2 is used to track the total bytes acked that are un-
4582 * amibguious and were never retransmitted. We track these on a per
4583 * destination address basis.
4585 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4586 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4587 /* Drag along the window_tsn for cwr's */
4588 net->cwr_window_tsn = cum_ack;
4590 net->prev_cwnd = net->cwnd;
4595 * CMT: Reset CUC and Fast recovery algo variables before
4598 net->new_pseudo_cumack = 0;
4599 net->will_exit_fast_recovery = 0;
4600 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4601 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4605 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4606 * to be greater than the cumack. Also reset saw_newack to 0
4609 net->saw_newack = 0;
4610 net->this_sack_highest_newack = last_tsn;
4612 /* process the new consecutive TSN first */
4613 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4614 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4615 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4617 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4619 * If it is less than ACKED, it is
4620 * now no-longer in flight. Higher
4621 * values may occur during marking
4623 if ((tp1->whoTo->dest_state &
4624 SCTP_ADDR_UNCONFIRMED) &&
4625 (tp1->snd_count < 2)) {
4627 * If there was no retran
4628 * and the address is
4629 * un-confirmed and we sent
4631 * sacked.. its confirmed,
4634 tp1->whoTo->dest_state &=
4635 ~SCTP_ADDR_UNCONFIRMED;
4637 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4638 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4639 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4640 tp1->whoTo->flight_size,
4642 (uint32_t)(uintptr_t)tp1->whoTo,
4645 sctp_flight_size_decrease(tp1);
4646 sctp_total_flight_decrease(stcb, tp1);
4647 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4648 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4652 tp1->whoTo->net_ack += tp1->send_size;
4654 /* CMT SFR and DAC algos */
4655 this_sack_lowest_newack = tp1->rec.data.tsn;
4656 tp1->whoTo->saw_newack = 1;
4658 if (tp1->snd_count < 2) {
4660 * True non-retransmitted
4663 tp1->whoTo->net_ack2 +=
4666 /* update RTO too? */
4669 sctp_calculate_rto(stcb,
4672 &tp1->sent_rcv_time,
4673 SCTP_RTT_FROM_DATA)) {
4676 if (tp1->whoTo->rto_needed == 0) {
4677 tp1->whoTo->rto_needed = 1;
4683 * CMT: CUCv2 algorithm. From the
4684 * cumack'd TSNs, for each TSN being
4685 * acked for the first time, set the
4686 * following variables for the
4687 * corresp destination.
4688 * new_pseudo_cumack will trigger a
4690 * find_(rtx_)pseudo_cumack will
4691 * trigger search for the next
4692 * expected (rtx-)pseudo-cumack.
4694 tp1->whoTo->new_pseudo_cumack = 1;
4695 tp1->whoTo->find_pseudo_cumack = 1;
4696 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4699 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4700 sctp_log_sack(asoc->last_acked_seq,
4705 SCTP_LOG_TSN_ACKED);
4707 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4708 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4711 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4712 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4713 #ifdef SCTP_AUDITING_ENABLED
4714 sctp_audit_log(0xB3,
4715 (asoc->sent_queue_retran_cnt & 0x000000ff));
4718 if (tp1->rec.data.chunk_was_revoked) {
4719 /* deflate the cwnd */
4720 tp1->whoTo->cwnd -= tp1->book_size;
4721 tp1->rec.data.chunk_was_revoked = 0;
4723 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4724 tp1->sent = SCTP_DATAGRAM_ACKED;
4731 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4732 /* always set this up to cum-ack */
4733 asoc->this_sack_highest_gap = last_tsn;
4735 if ((num_seg > 0) || (num_nr_seg > 0)) {
4738 * thisSackHighestGap will increase while handling NEW
4739 * segments this_sack_highest_newack will increase while
4740 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4741 * used for CMT DAC algo. saw_newack will also change.
4743 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4744 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4745 num_seg, num_nr_seg, &rto_ok)) {
4749 * validate the biggest_tsn_acked in the gap acks if strict
4750 * adherence is wanted.
4752 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4754 * peer is either confused or we are under attack.
4757 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4758 biggest_tsn_acked, send_s);
4762 /*******************************************/
4763 /* cancel ALL T3-send timer if accum moved */
4764 /*******************************************/
4765 if (asoc->sctp_cmt_on_off > 0) {
4766 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4767 if (net->new_pseudo_cumack)
4768 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4770 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4775 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4776 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4777 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4781 /********************************************/
4782 /* drop the acked chunks from the sentqueue */
4783 /********************************************/
4784 asoc->last_acked_seq = cum_ack;
4786 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4787 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4790 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4791 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4792 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4795 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4799 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4800 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4801 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4802 asoc->trigger_reset = 1;
4804 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4805 if (PR_SCTP_ENABLED(tp1->flags)) {
4806 if (asoc->pr_sctp_cnt != 0)
4807 asoc->pr_sctp_cnt--;
4809 asoc->sent_queue_cnt--;
4811 /* sa_ignore NO_NULL_CHK */
4812 sctp_free_bufspace(stcb, asoc, tp1, 1);
4813 sctp_m_freem(tp1->data);
4815 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4816 asoc->sent_queue_cnt_removeable--;
4819 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4820 sctp_log_sack(asoc->last_acked_seq,
4825 SCTP_LOG_FREE_SENT);
4827 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4830 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4832 panic("Warning flight size is positive and should be 0");
4834 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4835 asoc->total_flight);
4837 asoc->total_flight = 0;
4840 /* sa_ignore NO_NULL_CHK */
4841 if ((wake_him) && (stcb->sctp_socket)) {
4842 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4843 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4844 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4846 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4848 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4849 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4853 if (asoc->fast_retran_loss_recovery && accum_moved) {
4854 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4855 /* Setup so we will exit RFC2582 fast recovery */
4856 will_exit_fast_recovery = 1;
4860 * Check for revoked fragments:
4862 * if Previous sack - Had no frags then we can't have any revoked if
4863 * Previous sack - Had frag's then - If we now have frags aka
4864 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4865 * some of them. else - The peer revoked all ACKED fragments, since
4866 * we had some before and now we have NONE.
4870 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4871 asoc->saw_sack_with_frags = 1;
4872 } else if (asoc->saw_sack_with_frags) {
4873 int cnt_revoked = 0;
4875 /* Peer revoked all dg's marked or acked */
4876 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4877 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4878 tp1->sent = SCTP_DATAGRAM_SENT;
4879 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4880 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4881 tp1->whoTo->flight_size,
4883 (uint32_t)(uintptr_t)tp1->whoTo,
4886 sctp_flight_size_increase(tp1);
4887 sctp_total_flight_increase(stcb, tp1);
4888 tp1->rec.data.chunk_was_revoked = 1;
4890 * To ensure that this increase in
4891 * flightsize, which is artificial, does not
4892 * throttle the sender, we also increase the
4893 * cwnd artificially.
4895 tp1->whoTo->cwnd += tp1->book_size;
4902 asoc->saw_sack_with_frags = 0;
4905 asoc->saw_sack_with_nr_frags = 1;
4907 asoc->saw_sack_with_nr_frags = 0;
4909 /* JRS - Use the congestion control given in the CC module */
4910 if (ecne_seen == 0) {
4911 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4912 if (net->net_ack2 > 0) {
4914 * Karn's rule applies to clearing error
4915 * count, this is optional.
4917 net->error_count = 0;
4918 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4919 /* addr came good */
4920 net->dest_state |= SCTP_ADDR_REACHABLE;
4921 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4922 0, (void *)net, SCTP_SO_NOT_LOCKED);
4925 if (net == stcb->asoc.primary_destination) {
4926 if (stcb->asoc.alternate) {
4928 * release the alternate,
4931 sctp_free_remote_addr(stcb->asoc.alternate);
4932 stcb->asoc.alternate = NULL;
4936 if (net->dest_state & SCTP_ADDR_PF) {
4937 net->dest_state &= ~SCTP_ADDR_PF;
4938 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4939 stcb->sctp_ep, stcb, net,
4940 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4941 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4942 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4943 /* Done with this net */
4946 /* restore any doubled timers */
4947 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4948 if (net->RTO < stcb->asoc.minrto) {
4949 net->RTO = stcb->asoc.minrto;
4951 if (net->RTO > stcb->asoc.maxrto) {
4952 net->RTO = stcb->asoc.maxrto;
4956 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4959 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4960 /* nothing left in-flight */
4961 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4962 /* stop all timers */
4963 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4965 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4966 net->flight_size = 0;
4967 net->partial_bytes_acked = 0;
4969 asoc->total_flight = 0;
4970 asoc->total_flight_count = 0;
4973 /**********************************/
4974 /* Now what about shutdown issues */
4975 /**********************************/
4976 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4977 /* nothing left on sendqueue.. consider done */
4978 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4979 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4980 asoc->peers_rwnd, 0, 0, a_rwnd);
4982 asoc->peers_rwnd = a_rwnd;
4983 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4984 /* SWS sender side engages */
4985 asoc->peers_rwnd = 0;
4988 if ((asoc->stream_queue_cnt == 1) &&
4989 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4990 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4991 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4992 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4994 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4995 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4996 (asoc->stream_queue_cnt == 1) &&
4997 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4998 struct mbuf *op_err;
5002 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5003 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_34;
5004 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5007 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5008 (asoc->stream_queue_cnt == 0)) {
5009 struct sctp_nets *netp;
5011 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5012 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5013 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5015 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5016 sctp_stop_timers_for_shutdown(stcb);
5017 if (asoc->alternate) {
5018 netp = asoc->alternate;
5020 netp = asoc->primary_destination;
5022 sctp_send_shutdown(stcb, netp);
5023 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5024 stcb->sctp_ep, stcb, netp);
5025 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5026 stcb->sctp_ep, stcb, NULL);
5028 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5029 (asoc->stream_queue_cnt == 0)) {
5030 struct sctp_nets *netp;
5032 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5033 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5034 sctp_stop_timers_for_shutdown(stcb);
5035 if (asoc->alternate) {
5036 netp = asoc->alternate;
5038 netp = asoc->primary_destination;
5040 sctp_send_shutdown_ack(stcb, netp);
5041 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5042 stcb->sctp_ep, stcb, netp);
5047 * Now here we are going to recycle net_ack for a different use...
5050 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5055 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5056 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5057 * automatically ensure that.
5059 if ((asoc->sctp_cmt_on_off > 0) &&
5060 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5061 (cmt_dac_flag == 0)) {
5062 this_sack_lowest_newack = cum_ack;
5064 if ((num_seg > 0) || (num_nr_seg > 0)) {
5065 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5066 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5068 /* JRS - Use the congestion control given in the CC module */
5069 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5071 /* Now are we exiting loss recovery ? */
5072 if (will_exit_fast_recovery) {
5073 /* Ok, we must exit fast recovery */
5074 asoc->fast_retran_loss_recovery = 0;
5076 if ((asoc->sat_t3_loss_recovery) &&
5077 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5078 /* end satellite t3 loss recovery */
5079 asoc->sat_t3_loss_recovery = 0;
5084 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5085 if (net->will_exit_fast_recovery) {
5086 /* Ok, we must exit fast recovery */
5087 net->fast_retran_loss_recovery = 0;
5091 /* Adjust and set the new rwnd value */
5092 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5093 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5094 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5096 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5097 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5098 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5099 /* SWS sender side engages */
5100 asoc->peers_rwnd = 0;
5102 if (asoc->peers_rwnd > old_rwnd) {
5103 win_probe_recovery = 1;
5107 * Now we must setup so we have a timer up for anyone with
5113 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5114 if (win_probe_recovery && (net->window_probe)) {
5115 win_probe_recovered = 1;
5117 * Find first chunk that was used with
5118 * window probe and clear the event. Put
5119 * it back into the send queue as if has
5122 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5123 if (tp1->window_probe) {
5124 sctp_window_probe_recovery(stcb, asoc, tp1);
5129 if (net->flight_size) {
5131 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5132 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5133 stcb->sctp_ep, stcb, net);
5135 if (net->window_probe) {
5136 net->window_probe = 0;
5139 if (net->window_probe) {
5141 * In window probes we must assure a timer
5142 * is still running there
5144 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5145 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5146 stcb->sctp_ep, stcb, net);
5149 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5150 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5152 SCTP_FROM_SCTP_INDATA + SCTP_LOC_35);
5157 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5158 (asoc->sent_queue_retran_cnt == 0) &&
5159 (win_probe_recovered == 0) &&
5162 * huh, this should not happen unless all packets are
5163 * PR-SCTP and marked to skip of course.
5165 if (sctp_fs_audit(asoc)) {
5166 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5167 net->flight_size = 0;
5169 asoc->total_flight = 0;
5170 asoc->total_flight_count = 0;
5171 asoc->sent_queue_retran_cnt = 0;
5172 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5173 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5174 sctp_flight_size_increase(tp1);
5175 sctp_total_flight_increase(stcb, tp1);
5176 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5177 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5184 /*********************************************/
5185 /* Here we perform PR-SCTP procedures */
5187 /*********************************************/
5188 /* C1. update advancedPeerAckPoint */
5189 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5190 asoc->advanced_peer_ack_point = cum_ack;
5192 /* C2. try to further move advancedPeerAckPoint ahead */
5193 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5194 struct sctp_tmit_chunk *lchk;
5195 uint32_t old_adv_peer_ack_point;
5197 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5198 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5199 /* C3. See if we need to send a Fwd-TSN */
5200 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5202 * ISSUE with ECN, see FWD-TSN processing.
5204 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5205 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5206 0xee, cum_ack, asoc->advanced_peer_ack_point,
5207 old_adv_peer_ack_point);
5209 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5210 send_forward_tsn(stcb, asoc);
5212 /* try to FR fwd-tsn's that get lost too */
5213 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5214 send_forward_tsn(stcb, asoc);
5218 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5219 if (lchk->whoTo != NULL) {
5224 /* Assure a timer is up */
5225 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5226 stcb->sctp_ep, stcb, lchk->whoTo);
5229 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5230 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5232 stcb->asoc.peers_rwnd,
5233 stcb->asoc.total_flight,
5234 stcb->asoc.total_output_queue_size);
5239 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5242 uint32_t cum_ack, a_rwnd;
5244 cum_ack = ntohl(cp->cumulative_tsn_ack);
5245 /* Arrange so a_rwnd does NOT change */
5246 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5248 /* Now call the express sack handling */
5249 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5253 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5254 struct sctp_stream_in *strmin)
5256 struct sctp_queued_to_read *control, *ncontrol;
5257 struct sctp_association *asoc;
5259 int need_reasm_check = 0;
5262 mid = strmin->last_mid_delivered;
5264 * First deliver anything prior to and including the stream no that
5267 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5268 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5269 /* this is deliverable now */
5270 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5271 if (control->on_strm_q) {
5272 if (control->on_strm_q == SCTP_ON_ORDERED) {
5273 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5274 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5275 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5278 panic("strmin: %p ctl: %p unknown %d",
5279 strmin, control, control->on_strm_q);
5282 control->on_strm_q = 0;
5284 /* subtract pending on streams */
5285 if (asoc->size_on_all_streams >= control->length) {
5286 asoc->size_on_all_streams -= control->length;
5289 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5291 asoc->size_on_all_streams = 0;
5294 sctp_ucount_decr(asoc->cnt_on_all_streams);
5295 /* deliver it to at least the delivery-q */
5296 if (stcb->sctp_socket) {
5297 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5298 sctp_add_to_readq(stcb->sctp_ep, stcb,
5300 &stcb->sctp_socket->so_rcv,
5301 1, SCTP_READ_LOCK_HELD,
5302 SCTP_SO_NOT_LOCKED);
5305 /* Its a fragmented message */
5306 if (control->first_frag_seen) {
5308 * Make it so this is next to
5309 * deliver, we restore later
5311 strmin->last_mid_delivered = control->mid - 1;
5312 need_reasm_check = 1;
5317 /* no more delivery now. */
5321 if (need_reasm_check) {
5324 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5325 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5326 /* Restore the next to deliver unless we are ahead */
5327 strmin->last_mid_delivered = mid;
5330 /* Left the front Partial one on */
5333 need_reasm_check = 0;
5336 * now we must deliver things in queue the normal way if any are
5339 mid = strmin->last_mid_delivered + 1;
5340 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5341 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5342 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5343 /* this is deliverable now */
5344 if (control->on_strm_q) {
5345 if (control->on_strm_q == SCTP_ON_ORDERED) {
5346 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5347 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5348 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5351 panic("strmin: %p ctl: %p unknown %d",
5352 strmin, control, control->on_strm_q);
5355 control->on_strm_q = 0;
5357 /* subtract pending on streams */
5358 if (asoc->size_on_all_streams >= control->length) {
5359 asoc->size_on_all_streams -= control->length;
5362 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5364 asoc->size_on_all_streams = 0;
5367 sctp_ucount_decr(asoc->cnt_on_all_streams);
5368 /* deliver it to at least the delivery-q */
5369 strmin->last_mid_delivered = control->mid;
5370 if (stcb->sctp_socket) {
5371 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5372 sctp_add_to_readq(stcb->sctp_ep, stcb,
5374 &stcb->sctp_socket->so_rcv, 1,
5375 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5378 mid = strmin->last_mid_delivered + 1;
5380 /* Its a fragmented message */
5381 if (control->first_frag_seen) {
5383 * Make it so this is next to
5386 strmin->last_mid_delivered = control->mid - 1;
5387 need_reasm_check = 1;
5395 if (need_reasm_check) {
5396 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5403 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5404 struct sctp_association *asoc,
5405 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5407 struct sctp_queued_to_read *control;
5408 struct sctp_stream_in *strm;
5409 struct sctp_tmit_chunk *chk, *nchk;
5410 int cnt_removed = 0;
5413 * For now large messages held on the stream reasm that are complete
5414 * will be tossed too. We could in theory do more work to spin
5415 * through and stop after dumping one msg aka seeing the start of a
5416 * new msg at the head, and call the delivery function... to see if
5417 * it can be delivered... But for now we just dump everything on the
5420 strm = &asoc->strmin[stream];
5421 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5422 if (control == NULL) {
5426 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5429 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5430 /* Purge hanging chunks */
5431 if (!asoc->idata_supported && (ordered == 0)) {
5432 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5437 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5438 if (asoc->size_on_reasm_queue >= chk->send_size) {
5439 asoc->size_on_reasm_queue -= chk->send_size;
5442 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5444 asoc->size_on_reasm_queue = 0;
5447 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5449 sctp_m_freem(chk->data);
5452 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5454 if (!TAILQ_EMPTY(&control->reasm)) {
5455 /* This has to be old data, unordered */
5456 if (control->data) {
5457 sctp_m_freem(control->data);
5458 control->data = NULL;
5460 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5461 chk = TAILQ_FIRST(&control->reasm);
5462 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5463 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5464 sctp_add_chk_to_control(control, strm, stcb, asoc,
5465 chk, SCTP_READ_LOCK_HELD);
5467 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5470 if (control->on_strm_q == SCTP_ON_ORDERED) {
5471 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5472 if (asoc->size_on_all_streams >= control->length) {
5473 asoc->size_on_all_streams -= control->length;
5476 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5478 asoc->size_on_all_streams = 0;
5481 sctp_ucount_decr(asoc->cnt_on_all_streams);
5482 control->on_strm_q = 0;
5483 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5484 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5485 control->on_strm_q = 0;
5487 } else if (control->on_strm_q) {
5488 panic("strm: %p ctl: %p unknown %d",
5489 strm, control, control->on_strm_q);
5492 control->on_strm_q = 0;
5493 if (control->on_read_q == 0) {
5494 sctp_free_remote_addr(control->whoFrom);
5495 if (control->data) {
5496 sctp_m_freem(control->data);
5497 control->data = NULL;
5499 sctp_free_a_readq(stcb, control);
5504 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5505 struct sctp_forward_tsn_chunk *fwd,
5506 int *abort_flag, struct mbuf *m, int offset)
5508 /* The pr-sctp fwd tsn */
5510 * here we will perform all the data receiver side steps for
5511 * processing FwdTSN, as required in by pr-sctp draft:
5513 * Assume we get FwdTSN(x):
5515 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5516 * + others we have 3) examine and update re-ordering queue on
5517 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5518 * report where we are.
5520 struct sctp_association *asoc;
5521 uint32_t new_cum_tsn, gap;
5522 unsigned int i, fwd_sz, m_size;
5524 struct sctp_stream_in *strm;
5525 struct sctp_queued_to_read *control, *sv;
5528 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5529 SCTPDBG(SCTP_DEBUG_INDATA1,
5530 "Bad size too small/big fwd-tsn\n");
5533 m_size = (stcb->asoc.mapping_array_size << 3);
5534 /*************************************************************/
5535 /* 1. Here we update local cumTSN and shift the bitmap array */
5536 /*************************************************************/
5537 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5539 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5540 /* Already got there ... */
5544 * now we know the new TSN is more advanced, let's find the actual
5547 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5548 asoc->cumulative_tsn = new_cum_tsn;
5549 if (gap >= m_size) {
5550 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5551 struct mbuf *op_err;
5552 char msg[SCTP_DIAG_INFO_LEN];
5555 * out of range (of single byte chunks in the rwnd I
5556 * give out). This must be an attacker.
5559 SCTP_SNPRINTF(msg, sizeof(msg),
5560 "New cum ack %8.8x too high, highest TSN %8.8x",
5561 new_cum_tsn, asoc->highest_tsn_inside_map);
5562 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5563 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_36;
5564 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5567 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5569 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5570 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5571 asoc->highest_tsn_inside_map = new_cum_tsn;
5573 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5574 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5576 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5577 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5580 SCTP_TCB_LOCK_ASSERT(stcb);
5581 for (i = 0; i <= gap; i++) {
5582 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5583 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5584 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5585 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5586 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5591 /*************************************************************/
5592 /* 2. Clear up re-assembly queue */
5593 /*************************************************************/
5595 /* This is now done as part of clearing up the stream/seq */
5596 if (asoc->idata_supported == 0) {
5599 /* Flush all the un-ordered data based on cum-tsn */
5600 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5601 for (sid = 0; sid < asoc->streamincnt; sid++) {
5602 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5604 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5606 /*******************************************************/
5607 /* 3. Update the PR-stream re-ordering queues and fix */
5608 /* delivery issues as needed. */
5609 /*******************************************************/
5610 fwd_sz -= sizeof(*fwd);
5613 unsigned int num_str;
5614 uint32_t mid, cur_mid;
5616 uint16_t ordered, flags;
5617 struct sctp_strseq *stseq, strseqbuf;
5618 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5620 offset += sizeof(*fwd);
5622 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5623 if (asoc->idata_supported) {
5624 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5626 num_str = fwd_sz / sizeof(struct sctp_strseq);
5628 for (i = 0; i < num_str; i++) {
5629 if (asoc->idata_supported) {
5630 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5631 sizeof(struct sctp_strseq_mid),
5632 (uint8_t *)&strseqbuf_m);
5633 offset += sizeof(struct sctp_strseq_mid);
5634 if (stseq_m == NULL) {
5637 sid = ntohs(stseq_m->sid);
5638 mid = ntohl(stseq_m->mid);
5639 flags = ntohs(stseq_m->flags);
5640 if (flags & PR_SCTP_UNORDERED_FLAG) {
5646 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5647 sizeof(struct sctp_strseq),
5648 (uint8_t *)&strseqbuf);
5649 offset += sizeof(struct sctp_strseq);
5650 if (stseq == NULL) {
5653 sid = ntohs(stseq->sid);
5654 mid = (uint32_t)ntohs(stseq->ssn);
5662 * Ok we now look for the stream/seq on the read
5663 * queue where its not all delivered. If we find it
5664 * we transmute the read entry into a PDI_ABORTED.
5666 if (sid >= asoc->streamincnt) {
5667 /* screwed up streams, stop! */
5670 if ((asoc->str_of_pdapi == sid) &&
5671 (asoc->ssn_of_pdapi == mid)) {
5673 * If this is the one we were partially
5674 * delivering now then we no longer are.
5675 * Note this will change with the reassembly
5678 asoc->fragmented_delivery_inprogress = 0;
5680 strm = &asoc->strmin[sid];
5681 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5682 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5684 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5685 if ((control->sinfo_stream == sid) &&
5686 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5687 str_seq = (sid << 16) | (0x0000ffff & mid);
5688 control->pdapi_aborted = 1;
5689 sv = stcb->asoc.control_pdapi;
5690 control->end_added = 1;
5691 if (control->on_strm_q == SCTP_ON_ORDERED) {
5692 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5693 if (asoc->size_on_all_streams >= control->length) {
5694 asoc->size_on_all_streams -= control->length;
5697 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5699 asoc->size_on_all_streams = 0;
5702 sctp_ucount_decr(asoc->cnt_on_all_streams);
5703 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5704 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5706 } else if (control->on_strm_q) {
5707 panic("strm: %p ctl: %p unknown %d",
5708 strm, control, control->on_strm_q);
5711 control->on_strm_q = 0;
5712 stcb->asoc.control_pdapi = control;
5713 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5715 SCTP_PARTIAL_DELIVERY_ABORTED,
5717 SCTP_SO_NOT_LOCKED);
5718 stcb->asoc.control_pdapi = sv;
5720 } else if ((control->sinfo_stream == sid) &&
5721 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5722 /* We are past our victim SSN */
5726 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5727 /* Update the sequence number */
5728 strm->last_mid_delivered = mid;
5730 /* now kick the stream the new way */
5731 /* sa_ignore NO_NULL_CHK */
5732 sctp_kick_prsctp_reorder_queue(stcb, strm);
5734 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5737 * Now slide thing forward.
5739 sctp_slide_mapping_arrays(stcb);