2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int hold_rlock);
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
78 /* Calculate what the rwnd would be */
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
85 * This is really set wrong with respect to a 1-2-m socket. Since
86 * the sb_cc is the count that everyone as put up. When we re-write
87 * sctp_soreceive then we will fix this so that ONLY this
88 * associations data is taken into account.
90 if (stcb->sctp_socket == NULL) {
94 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 if (stcb->asoc.sb_cc == 0 &&
99 asoc->cnt_on_reasm_queue == 0 &&
100 asoc->cnt_on_all_streams == 0) {
101 /* Full rwnd granted */
102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
105 /* get actual space */
106 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
108 * take out what has NOT been put on socket queue and we yet hold
111 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 asoc->cnt_on_reasm_queue * MSIZE));
113 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 asoc->cnt_on_all_streams * MSIZE));
120 /* what is the overhead of all these rwnd's */
121 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
123 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 * even it is 0. SWS engaged
126 if (calc < stcb->asoc.my_rwnd_control_len) {
135 * Build out our readq entry based on the incoming packet.
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139 struct sctp_nets *net,
140 uint32_t tsn, uint32_t ppid,
141 uint32_t context, uint16_t sid,
142 uint32_t mid, uint8_t flags,
145 struct sctp_queued_to_read *read_queue_e = NULL;
147 sctp_alloc_a_readq(stcb, read_queue_e);
148 if (read_queue_e == NULL) {
151 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 read_queue_e->sinfo_stream = sid;
153 read_queue_e->sinfo_flags = (flags << 8);
154 read_queue_e->sinfo_ppid = ppid;
155 read_queue_e->sinfo_context = context;
156 read_queue_e->sinfo_tsn = tsn;
157 read_queue_e->sinfo_cumtsn = tsn;
158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 read_queue_e->mid = mid;
160 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 TAILQ_INIT(&read_queue_e->reasm);
162 read_queue_e->whoFrom = net;
163 atomic_add_int(&net->ref_count, 1);
164 read_queue_e->data = dm;
165 read_queue_e->stcb = stcb;
166 read_queue_e->port_from = stcb->rport;
167 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
168 read_queue_e->do_not_ref_stcb = 1;
171 return (read_queue_e);
175 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
177 struct sctp_extrcvinfo *seinfo;
178 struct sctp_sndrcvinfo *outinfo;
179 struct sctp_rcvinfo *rcvinfo;
180 struct sctp_nxtinfo *nxtinfo;
187 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
188 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
189 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
190 /* user does not want any ancillary data */
195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
196 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
198 seinfo = (struct sctp_extrcvinfo *)sinfo;
199 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
200 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
202 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
206 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
207 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
209 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
212 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
218 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
223 SCTP_BUF_LEN(ret) = 0;
225 /* We need a CMSG header followed by the struct */
226 cmh = mtod(ret, struct cmsghdr *);
228 * Make sure that there is no un-initialized padding between the
229 * cmsg header and cmsg data and after the cmsg data.
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
233 cmh->cmsg_level = IPPROTO_SCTP;
234 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
235 cmh->cmsg_type = SCTP_RCVINFO;
236 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
237 rcvinfo->rcv_sid = sinfo->sinfo_stream;
238 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
239 rcvinfo->rcv_flags = sinfo->sinfo_flags;
240 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
241 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
242 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
243 rcvinfo->rcv_context = sinfo->sinfo_context;
244 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
245 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
246 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
249 cmh->cmsg_level = IPPROTO_SCTP;
250 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
251 cmh->cmsg_type = SCTP_NXTINFO;
252 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
253 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
254 nxtinfo->nxt_flags = 0;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
256 nxtinfo->nxt_flags |= SCTP_UNORDERED;
258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
259 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
261 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
262 nxtinfo->nxt_flags |= SCTP_COMPLETE;
264 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
265 nxtinfo->nxt_length = seinfo->serinfo_next_length;
266 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
267 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
268 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
270 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
271 cmh->cmsg_level = IPPROTO_SCTP;
272 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
274 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
275 cmh->cmsg_type = SCTP_EXTRCV;
276 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
277 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
279 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
280 cmh->cmsg_type = SCTP_SNDRCV;
282 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
292 uint32_t gap, i, cumackp1;
294 int in_r = 0, in_nr = 0;
296 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
299 cumackp1 = asoc->cumulative_tsn + 1;
300 if (SCTP_TSN_GT(cumackp1, tsn)) {
302 * this tsn is behind the cum ack and thus we don't need to
303 * worry about it being moved from one to the other.
307 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
308 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
309 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
310 if ((in_r == 0) && (in_nr == 0)) {
312 panic("Things are really messed up now");
314 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
315 sctp_print_mapping_array(asoc);
319 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
321 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
322 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
323 asoc->highest_tsn_inside_nr_map = tsn;
325 if (tsn == asoc->highest_tsn_inside_map) {
326 /* We must back down to see what the new highest is */
327 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
328 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
329 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
330 asoc->highest_tsn_inside_map = i;
336 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
342 sctp_place_control_in_stream(struct sctp_stream_in *strm,
343 struct sctp_association *asoc,
344 struct sctp_queued_to_read *control)
346 struct sctp_queued_to_read *at;
347 struct sctp_readhead *q;
348 uint8_t flags, unordered;
350 flags = (control->sinfo_flags >> 8);
351 unordered = flags & SCTP_DATA_UNORDERED;
353 q = &strm->uno_inqueue;
354 if (asoc->idata_supported == 0) {
355 if (!TAILQ_EMPTY(q)) {
357 * Only one stream can be here in old style
362 TAILQ_INSERT_TAIL(q, control, next_instrm);
363 control->on_strm_q = SCTP_ON_UNORDERED;
369 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
370 control->end_added = 1;
371 control->first_frag_seen = 1;
372 control->last_frag_seen = 1;
374 if (TAILQ_EMPTY(q)) {
376 TAILQ_INSERT_HEAD(q, control, next_instrm);
378 control->on_strm_q = SCTP_ON_UNORDERED;
380 control->on_strm_q = SCTP_ON_ORDERED;
384 TAILQ_FOREACH(at, q, next_instrm) {
385 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
387 * one in queue is bigger than the new one,
388 * insert before this one
390 TAILQ_INSERT_BEFORE(at, control, next_instrm);
392 control->on_strm_q = SCTP_ON_UNORDERED;
394 control->on_strm_q = SCTP_ON_ORDERED;
397 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
399 * Gak, He sent me a duplicate msg id
400 * number?? return -1 to abort.
404 if (TAILQ_NEXT(at, next_instrm) == NULL) {
406 * We are at the end, insert it
409 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
410 sctp_log_strm_del(control, at,
411 SCTP_STR_LOG_FROM_INSERT_TL);
413 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
415 control->on_strm_q = SCTP_ON_UNORDERED;
417 control->on_strm_q = SCTP_ON_ORDERED;
428 sctp_abort_in_reasm(struct sctp_tcb *stcb,
429 struct sctp_queued_to_read *control,
430 struct sctp_tmit_chunk *chk,
431 int *abort_flag, int opspot)
433 char msg[SCTP_DIAG_INFO_LEN];
436 if (stcb->asoc.idata_supported) {
437 SCTP_SNPRINTF(msg, sizeof(msg),
438 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
440 control->fsn_included,
443 chk->rec.data.fsn, chk->rec.data.mid);
445 SCTP_SNPRINTF(msg, sizeof(msg),
446 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
448 control->fsn_included,
452 (uint16_t)chk->rec.data.mid);
454 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
455 sctp_m_freem(chk->data);
457 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
458 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
459 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
464 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
467 * The control could not be placed and must be cleaned.
469 struct sctp_tmit_chunk *chk, *nchk;
471 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
472 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
474 sctp_m_freem(chk->data);
476 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
478 sctp_free_remote_addr(control->whoFrom);
480 sctp_m_freem(control->data);
481 control->data = NULL;
483 sctp_free_a_readq(stcb, control);
487 * Queue the chunk either right into the socket buffer if it is the next one
488 * to go OR put it in the correct place in the delivery queue. If we do
489 * append to the so_buf, keep doing so until we are out of order as
490 * long as the control's entered are non-fragmented.
493 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
494 struct sctp_association *asoc,
495 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
498 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
499 * all the data in one stream this could happen quite rapidly. One
500 * could use the TSN to keep track of things, but this scheme breaks
501 * down in the other type of stream usage that could occur. Send a
502 * single msg to stream 0, send 4Billion messages to stream 1, now
503 * send a message to stream 0. You have a situation where the TSN
504 * has wrapped but not in the stream. Is this worth worrying about
505 * or should we just change our queue sort at the bottom to be by
508 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
509 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
510 * assignment this could happen... and I don't see how this would be
511 * a violation. So for now I am undecided an will leave the sort by
512 * SSN alone. Maybe a hybred approach is the answer
515 struct sctp_queued_to_read *at;
519 struct sctp_stream_in *strm;
520 char msg[SCTP_DIAG_INFO_LEN];
522 strm = &asoc->strmin[control->sinfo_stream];
523 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
524 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
526 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
527 /* The incoming sseq is behind where we last delivered? */
528 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
529 strm->last_mid_delivered, control->mid);
531 * throw it in the stream so it gets cleaned up in
532 * association destruction
534 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
535 if (asoc->idata_supported) {
536 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
537 strm->last_mid_delivered, control->sinfo_tsn,
538 control->sinfo_stream, control->mid);
540 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
541 (uint16_t)strm->last_mid_delivered,
543 control->sinfo_stream,
544 (uint16_t)control->mid);
546 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
547 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
548 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
554 asoc->size_on_all_streams += control->length;
555 sctp_ucount_incr(asoc->cnt_on_all_streams);
556 nxt_todel = strm->last_mid_delivered + 1;
557 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
558 /* can be delivered right away? */
559 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
560 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
562 /* EY it wont be queued if it could be delivered directly */
564 if (asoc->size_on_all_streams >= control->length) {
565 asoc->size_on_all_streams -= control->length;
568 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
570 asoc->size_on_all_streams = 0;
573 sctp_ucount_decr(asoc->cnt_on_all_streams);
574 strm->last_mid_delivered++;
575 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
576 sctp_add_to_readq(stcb->sctp_ep, stcb,
578 &stcb->sctp_socket->so_rcv, 1,
579 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
580 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
582 nxt_todel = strm->last_mid_delivered + 1;
583 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
584 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
585 if (control->on_strm_q == SCTP_ON_ORDERED) {
586 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
587 if (asoc->size_on_all_streams >= control->length) {
588 asoc->size_on_all_streams -= control->length;
591 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
593 asoc->size_on_all_streams = 0;
596 sctp_ucount_decr(asoc->cnt_on_all_streams);
599 panic("Huh control: %p is on_strm_q: %d",
600 control, control->on_strm_q);
603 control->on_strm_q = 0;
604 strm->last_mid_delivered++;
606 * We ignore the return of deliver_data here
607 * since we always can hold the chunk on the
608 * d-queue. And we have a finite number that
609 * can be delivered from the strq.
611 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
612 sctp_log_strm_del(control, NULL,
613 SCTP_STR_LOG_FROM_IMMED_DEL);
615 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
616 sctp_add_to_readq(stcb->sctp_ep, stcb,
618 &stcb->sctp_socket->so_rcv, 1,
619 SCTP_READ_LOCK_NOT_HELD,
622 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
630 * Ok, we did not deliver this guy, find the correct place
631 * to put it on the queue.
633 if (sctp_place_control_in_stream(strm, asoc, control)) {
634 SCTP_SNPRINTF(msg, sizeof(msg),
635 "Queue to str MID: %u duplicate", control->mid);
636 sctp_clean_up_control(stcb, control);
637 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
638 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
639 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
647 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
649 struct mbuf *m, *prev = NULL;
650 struct sctp_tcb *stcb;
652 stcb = control->stcb;
653 control->held_length = 0;
657 if (SCTP_BUF_LEN(m) == 0) {
658 /* Skip mbufs with NO length */
661 control->data = sctp_m_free(m);
664 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
665 m = SCTP_BUF_NEXT(prev);
668 control->tail_mbuf = prev;
673 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
674 if (control->on_read_q) {
676 * On read queue so we must increment the SB stuff,
677 * we assume caller has done any locks of SB.
679 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
681 m = SCTP_BUF_NEXT(m);
684 control->tail_mbuf = prev;
689 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
691 struct mbuf *prev = NULL;
692 struct sctp_tcb *stcb;
694 stcb = control->stcb;
697 panic("Control broken");
702 if (control->tail_mbuf == NULL) {
704 sctp_m_freem(control->data);
706 sctp_setup_tail_pointer(control);
709 control->tail_mbuf->m_next = m;
711 if (SCTP_BUF_LEN(m) == 0) {
712 /* Skip mbufs with NO length */
715 control->tail_mbuf->m_next = sctp_m_free(m);
716 m = control->tail_mbuf->m_next;
718 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
719 m = SCTP_BUF_NEXT(prev);
722 control->tail_mbuf = prev;
727 if (control->on_read_q) {
729 * On read queue so we must increment the SB stuff,
730 * we assume caller has done any locks of SB.
732 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
734 *added += SCTP_BUF_LEN(m);
735 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
736 m = SCTP_BUF_NEXT(m);
739 control->tail_mbuf = prev;
744 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
746 memset(nc, 0, sizeof(struct sctp_queued_to_read));
747 nc->sinfo_stream = control->sinfo_stream;
748 nc->mid = control->mid;
749 TAILQ_INIT(&nc->reasm);
750 nc->top_fsn = control->top_fsn;
751 nc->mid = control->mid;
752 nc->sinfo_flags = control->sinfo_flags;
753 nc->sinfo_ppid = control->sinfo_ppid;
754 nc->sinfo_context = control->sinfo_context;
755 nc->fsn_included = 0xffffffff;
756 nc->sinfo_tsn = control->sinfo_tsn;
757 nc->sinfo_cumtsn = control->sinfo_cumtsn;
758 nc->sinfo_assoc_id = control->sinfo_assoc_id;
759 nc->whoFrom = control->whoFrom;
760 atomic_add_int(&nc->whoFrom->ref_count, 1);
761 nc->stcb = control->stcb;
762 nc->port_from = control->port_from;
763 nc->do_not_ref_stcb = control->do_not_ref_stcb;
767 sctp_reset_a_control(struct sctp_queued_to_read *control,
768 struct sctp_inpcb *inp, uint32_t tsn)
770 control->fsn_included = tsn;
771 if (control->on_read_q) {
773 * We have to purge it from there, hopefully this will work
776 TAILQ_REMOVE(&inp->read_queue, control, next);
777 control->on_read_q = 0;
782 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
783 struct sctp_association *asoc,
784 struct sctp_stream_in *strm,
785 struct sctp_queued_to_read *control,
787 int inp_read_lock_held)
790 * Special handling for the old un-ordered data chunk. All the
791 * chunks/TSN's go to mid 0. So we have to do the old style watching
792 * to see if we have it all. If you return one, no other control
793 * entries on the un-ordered queue will be looked at. In theory
794 * there should be no others entries in reality, unless the guy is
795 * sending both unordered NDATA and unordered DATA...
797 struct sctp_tmit_chunk *chk, *lchk, *tchk;
799 struct sctp_queued_to_read *nc;
802 if (control->first_frag_seen == 0) {
803 /* Nothing we can do, we have not seen the first piece yet */
806 /* Collapse any we can */
809 fsn = control->fsn_included + 1;
810 /* Now what can we add? */
811 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
812 if (chk->rec.data.fsn == fsn) {
814 sctp_alloc_a_readq(stcb, nc);
818 memset(nc, 0, sizeof(struct sctp_queued_to_read));
819 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
820 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
824 if (control->end_added) {
826 if (!TAILQ_EMPTY(&control->reasm)) {
828 * Ok we have to move anything left
829 * on the control queue to a new
832 sctp_build_readq_entry_from_ctl(nc, control);
833 tchk = TAILQ_FIRST(&control->reasm);
834 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
835 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
836 if (asoc->size_on_reasm_queue >= tchk->send_size) {
837 asoc->size_on_reasm_queue -= tchk->send_size;
840 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
842 asoc->size_on_reasm_queue = 0;
845 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
846 nc->first_frag_seen = 1;
847 nc->fsn_included = tchk->rec.data.fsn;
848 nc->data = tchk->data;
849 nc->sinfo_ppid = tchk->rec.data.ppid;
850 nc->sinfo_tsn = tchk->rec.data.tsn;
851 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
853 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
854 sctp_setup_tail_pointer(nc);
855 tchk = TAILQ_FIRST(&control->reasm);
857 /* Spin the rest onto the queue */
859 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
860 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
861 tchk = TAILQ_FIRST(&control->reasm);
864 * Now lets add it to the queue
865 * after removing control
867 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
868 nc->on_strm_q = SCTP_ON_UNORDERED;
869 if (control->on_strm_q) {
870 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
871 control->on_strm_q = 0;
874 if (control->pdapi_started) {
875 strm->pd_api_started = 0;
876 control->pdapi_started = 0;
878 if (control->on_strm_q) {
879 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
880 control->on_strm_q = 0;
881 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
883 if (control->on_read_q == 0) {
884 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
885 &stcb->sctp_socket->so_rcv, control->end_added,
886 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
888 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
889 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
891 * Switch to the new guy and
897 if (nc->on_strm_q == 0) {
898 sctp_free_a_readq(stcb, nc);
903 sctp_free_a_readq(stcb, nc);
910 if (cnt_added && strm->pd_api_started) {
911 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
913 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
914 strm->pd_api_started = 1;
915 control->pdapi_started = 1;
916 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
917 &stcb->sctp_socket->so_rcv, control->end_added,
918 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
919 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
927 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
928 struct sctp_association *asoc,
929 struct sctp_queued_to_read *control,
930 struct sctp_tmit_chunk *chk,
933 struct sctp_tmit_chunk *at;
937 * Here we need to place the chunk into the control structure sorted
938 * in the correct order.
940 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
941 /* Its the very first one. */
942 SCTPDBG(SCTP_DEBUG_XXX,
943 "chunk is a first fsn: %u becomes fsn_included\n",
945 at = TAILQ_FIRST(&control->reasm);
946 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
948 * The first chunk in the reassembly is a smaller
949 * TSN than this one, even though this has a first,
950 * it must be from a subsequent msg.
954 if (control->first_frag_seen) {
956 * In old un-ordered we can reassembly on one
957 * control multiple messages. As long as the next
958 * FIRST is greater then the old first (TSN i.e. FSN
964 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
966 * Easy way the start of a new guy beyond
971 if ((chk->rec.data.fsn == control->fsn_included) ||
972 (control->pdapi_started)) {
974 * Ok this should not happen, if it does we
975 * started the pd-api on the higher TSN
976 * (since the equals part is a TSN failure
979 * We are completly hosed in that case since
980 * I have no way to recover. This really
981 * will only happen if we can get more TSN's
982 * higher before the pd-api-point.
984 sctp_abort_in_reasm(stcb, control, chk,
986 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
991 * Ok we have two firsts and the one we just got is
992 * smaller than the one we previously placed.. yuck!
993 * We must swap them out.
996 tdata = control->data;
997 control->data = chk->data;
999 /* Save the lengths */
1000 chk->send_size = control->length;
1001 /* Recompute length of control and tail pointer */
1002 sctp_setup_tail_pointer(control);
1003 /* Fix the FSN included */
1004 tmp = control->fsn_included;
1005 control->fsn_included = chk->rec.data.fsn;
1006 chk->rec.data.fsn = tmp;
1007 /* Fix the TSN included */
1008 tmp = control->sinfo_tsn;
1009 control->sinfo_tsn = chk->rec.data.tsn;
1010 chk->rec.data.tsn = tmp;
1011 /* Fix the PPID included */
1012 tmp = control->sinfo_ppid;
1013 control->sinfo_ppid = chk->rec.data.ppid;
1014 chk->rec.data.ppid = tmp;
1015 /* Fix tail pointer */
1018 control->first_frag_seen = 1;
1019 control->fsn_included = chk->rec.data.fsn;
1020 control->top_fsn = chk->rec.data.fsn;
1021 control->sinfo_tsn = chk->rec.data.tsn;
1022 control->sinfo_ppid = chk->rec.data.ppid;
1023 control->data = chk->data;
1024 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1026 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1027 sctp_setup_tail_pointer(control);
1032 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1033 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1035 * This one in queue is bigger than the new one,
1036 * insert the new one before at.
1038 asoc->size_on_reasm_queue += chk->send_size;
1039 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1041 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1043 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1045 * They sent a duplicate fsn number. This really
1046 * should not happen since the FSN is a TSN and it
1047 * should have been dropped earlier.
1049 sctp_abort_in_reasm(stcb, control, chk,
1051 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1056 if (inserted == 0) {
1057 /* Its at the end */
1058 asoc->size_on_reasm_queue += chk->send_size;
1059 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1060 control->top_fsn = chk->rec.data.fsn;
1061 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1066 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1067 struct sctp_stream_in *strm, int inp_read_lock_held)
1070 * Given a stream, strm, see if any of the SSN's on it that are
1071 * fragmented are ready to deliver. If so go ahead and place them on
1072 * the read queue. In so placing if we have hit the end, then we
1073 * need to remove them from the stream's queue.
1075 struct sctp_queued_to_read *control, *nctl = NULL;
1076 uint32_t next_to_del;
1080 if (stcb->sctp_socket) {
1081 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1082 stcb->sctp_ep->partial_delivery_point);
1084 pd_point = stcb->sctp_ep->partial_delivery_point;
1086 control = TAILQ_FIRST(&strm->uno_inqueue);
1088 if ((control != NULL) &&
1089 (asoc->idata_supported == 0)) {
1090 /* Special handling needed for "old" data format */
1091 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1095 if (strm->pd_api_started) {
1096 /* Can't add more */
1100 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1101 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1102 nctl = TAILQ_NEXT(control, next_instrm);
1103 if (control->end_added) {
1104 /* We just put the last bit on */
1105 if (control->on_strm_q) {
1107 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1108 panic("Huh control: %p on_q: %d -- not unordered?",
1109 control, control->on_strm_q);
1112 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1113 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1114 if (asoc->size_on_all_streams >= control->length) {
1115 asoc->size_on_all_streams -= control->length;
1118 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1120 asoc->size_on_all_streams = 0;
1123 sctp_ucount_decr(asoc->cnt_on_all_streams);
1124 control->on_strm_q = 0;
1126 if (control->on_read_q == 0) {
1127 sctp_add_to_readq(stcb->sctp_ep, stcb,
1129 &stcb->sctp_socket->so_rcv, control->end_added,
1130 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1133 /* Can we do a PD-API for this un-ordered guy? */
1134 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1135 strm->pd_api_started = 1;
1136 control->pdapi_started = 1;
1137 sctp_add_to_readq(stcb->sctp_ep, stcb,
1139 &stcb->sctp_socket->so_rcv, control->end_added,
1140 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1148 control = TAILQ_FIRST(&strm->inqueue);
1149 if (strm->pd_api_started) {
1150 /* Can't add more */
1153 if (control == NULL) {
1156 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1158 * Ok the guy at the top was being partially delivered
1159 * completed, so we remove it. Note the pd_api flag was
1160 * taken off when the chunk was merged on in
1161 * sctp_queue_data_for_reasm below.
1163 nctl = TAILQ_NEXT(control, next_instrm);
1164 SCTPDBG(SCTP_DEBUG_XXX,
1165 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1166 control, control->end_added, control->mid,
1167 control->top_fsn, control->fsn_included,
1168 strm->last_mid_delivered);
1169 if (control->end_added) {
1170 if (control->on_strm_q) {
1172 if (control->on_strm_q != SCTP_ON_ORDERED) {
1173 panic("Huh control: %p on_q: %d -- not ordered?",
1174 control, control->on_strm_q);
1177 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1178 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1179 if (asoc->size_on_all_streams >= control->length) {
1180 asoc->size_on_all_streams -= control->length;
1183 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1185 asoc->size_on_all_streams = 0;
1188 sctp_ucount_decr(asoc->cnt_on_all_streams);
1189 control->on_strm_q = 0;
1191 if (strm->pd_api_started && control->pdapi_started) {
1192 control->pdapi_started = 0;
1193 strm->pd_api_started = 0;
1195 if (control->on_read_q == 0) {
1196 sctp_add_to_readq(stcb->sctp_ep, stcb,
1198 &stcb->sctp_socket->so_rcv, control->end_added,
1199 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1204 if (strm->pd_api_started) {
1206 * Can't add more must have gotten an un-ordered above being
1207 * partially delivered.
1212 next_to_del = strm->last_mid_delivered + 1;
1214 SCTPDBG(SCTP_DEBUG_XXX,
1215 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1216 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1218 nctl = TAILQ_NEXT(control, next_instrm);
1219 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1220 (control->first_frag_seen)) {
1223 /* Ok we can deliver it onto the stream. */
1224 if (control->end_added) {
1225 /* We are done with it afterwards */
1226 if (control->on_strm_q) {
1228 if (control->on_strm_q != SCTP_ON_ORDERED) {
1229 panic("Huh control: %p on_q: %d -- not ordered?",
1230 control, control->on_strm_q);
1233 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1234 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1235 if (asoc->size_on_all_streams >= control->length) {
1236 asoc->size_on_all_streams -= control->length;
1239 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1241 asoc->size_on_all_streams = 0;
1244 sctp_ucount_decr(asoc->cnt_on_all_streams);
1245 control->on_strm_q = 0;
1249 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1251 * A singleton now slipping through - mark
1252 * it non-revokable too
1254 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1255 } else if (control->end_added == 0) {
1257 * Check if we can defer adding until its
1260 if ((control->length < pd_point) || (strm->pd_api_started)) {
1262 * Don't need it or cannot add more
1263 * (one being delivered that way)
1268 done = (control->end_added) && (control->last_frag_seen);
1269 if (control->on_read_q == 0) {
1271 if (asoc->size_on_all_streams >= control->length) {
1272 asoc->size_on_all_streams -= control->length;
1275 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1277 asoc->size_on_all_streams = 0;
1280 strm->pd_api_started = 1;
1281 control->pdapi_started = 1;
1283 sctp_add_to_readq(stcb->sctp_ep, stcb,
1285 &stcb->sctp_socket->so_rcv, control->end_added,
1286 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1288 strm->last_mid_delivered = next_to_del;
1301 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1302 struct sctp_stream_in *strm,
1303 struct sctp_tcb *stcb, struct sctp_association *asoc,
1304 struct sctp_tmit_chunk *chk, int hold_rlock)
1307 * Given a control and a chunk, merge the data from the chk onto the
1308 * control and free up the chunk resources.
1313 if (control->on_read_q && (hold_rlock == 0)) {
1315 * Its being pd-api'd so we must do some locks.
1317 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1320 if (control->data == NULL) {
1321 control->data = chk->data;
1322 sctp_setup_tail_pointer(control);
1324 sctp_add_to_tail_pointer(control, chk->data, &added);
1326 control->fsn_included = chk->rec.data.fsn;
1327 asoc->size_on_reasm_queue -= chk->send_size;
1328 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1329 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1331 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1332 control->first_frag_seen = 1;
1333 control->sinfo_tsn = chk->rec.data.tsn;
1334 control->sinfo_ppid = chk->rec.data.ppid;
1336 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1338 if ((control->on_strm_q) && (control->on_read_q)) {
1339 if (control->pdapi_started) {
1340 control->pdapi_started = 0;
1341 strm->pd_api_started = 0;
1343 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1345 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1346 control->on_strm_q = 0;
1347 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1349 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1351 * Don't need to decrement
1352 * size_on_all_streams, since control is on
1355 sctp_ucount_decr(asoc->cnt_on_all_streams);
1356 control->on_strm_q = 0;
1358 } else if (control->on_strm_q) {
1359 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1360 control->on_strm_q);
1364 control->end_added = 1;
1365 control->last_frag_seen = 1;
1368 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1370 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1375 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1376 * queue, see if anthing can be delivered. If so pull it off (or as much as
1377 * we can. If we run out of space then we must dump what we can and set the
1378 * appropriate flag to say we queued what we could.
1381 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1382 struct sctp_queued_to_read *control,
1383 struct sctp_tmit_chunk *chk,
1384 int created_control,
1385 int *abort_flag, uint32_t tsn)
1388 struct sctp_tmit_chunk *at, *nat;
1389 struct sctp_stream_in *strm;
1390 int do_wakeup, unordered;
1393 strm = &asoc->strmin[control->sinfo_stream];
1395 * For old un-ordered data chunks.
1397 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1402 /* Must be added to the stream-in queue */
1403 if (created_control) {
1404 if ((unordered == 0) || (asoc->idata_supported)) {
1405 sctp_ucount_incr(asoc->cnt_on_all_streams);
1407 if (sctp_place_control_in_stream(strm, asoc, control)) {
1408 /* Duplicate SSN? */
1409 sctp_abort_in_reasm(stcb, control, chk,
1411 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1412 sctp_clean_up_control(stcb, control);
1415 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1417 * Ok we created this control and now lets validate
1418 * that its legal i.e. there is a B bit set, if not
1419 * and we have up to the cum-ack then its invalid.
1421 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1422 sctp_abort_in_reasm(stcb, control, chk,
1424 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1429 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1430 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1434 * Ok we must queue the chunk into the reasembly portion: o if its
1435 * the first it goes to the control mbuf. o if its not first but the
1436 * next in sequence it goes to the control, and each succeeding one
1437 * in order also goes. o if its not in order we place it on the list
1440 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1441 /* Its the very first one. */
1442 SCTPDBG(SCTP_DEBUG_XXX,
1443 "chunk is a first fsn: %u becomes fsn_included\n",
1445 if (control->first_frag_seen) {
1447 * Error on senders part, they either sent us two
1448 * data chunks with FIRST, or they sent two
1449 * un-ordered chunks that were fragmented at the
1450 * same time in the same stream.
1452 sctp_abort_in_reasm(stcb, control, chk,
1454 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1457 control->first_frag_seen = 1;
1458 control->sinfo_ppid = chk->rec.data.ppid;
1459 control->sinfo_tsn = chk->rec.data.tsn;
1460 control->fsn_included = chk->rec.data.fsn;
1461 control->data = chk->data;
1462 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1464 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1465 sctp_setup_tail_pointer(control);
1466 asoc->size_on_all_streams += control->length;
1468 /* Place the chunk in our list */
1471 if (control->last_frag_seen == 0) {
1472 /* Still willing to raise highest FSN seen */
1473 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1474 SCTPDBG(SCTP_DEBUG_XXX,
1475 "We have a new top_fsn: %u\n",
1477 control->top_fsn = chk->rec.data.fsn;
1479 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1480 SCTPDBG(SCTP_DEBUG_XXX,
1481 "The last fsn is now in place fsn: %u\n",
1483 control->last_frag_seen = 1;
1484 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1485 SCTPDBG(SCTP_DEBUG_XXX,
1486 "New fsn: %u is not at top_fsn: %u -- abort\n",
1489 sctp_abort_in_reasm(stcb, control, chk,
1491 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1495 if (asoc->idata_supported || control->first_frag_seen) {
1497 * For IDATA we always check since we know
1498 * that the first fragment is 0. For old
1499 * DATA we have to receive the first before
1500 * we know the first FSN (which is the TSN).
1502 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1504 * We have already delivered up to
1507 sctp_abort_in_reasm(stcb, control, chk,
1509 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1514 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1515 /* Second last? huh? */
1516 SCTPDBG(SCTP_DEBUG_XXX,
1517 "Duplicate last fsn: %u (top: %u) -- abort\n",
1518 chk->rec.data.fsn, control->top_fsn);
1519 sctp_abort_in_reasm(stcb, control,
1521 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1524 if (asoc->idata_supported || control->first_frag_seen) {
1526 * For IDATA we always check since we know
1527 * that the first fragment is 0. For old
1528 * DATA we have to receive the first before
1529 * we know the first FSN (which is the TSN).
1532 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1534 * We have already delivered up to
1537 SCTPDBG(SCTP_DEBUG_XXX,
1538 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1539 chk->rec.data.fsn, control->fsn_included);
1540 sctp_abort_in_reasm(stcb, control, chk,
1542 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1547 * validate not beyond top FSN if we have seen last
1550 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1551 SCTPDBG(SCTP_DEBUG_XXX,
1552 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1555 sctp_abort_in_reasm(stcb, control, chk,
1557 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1562 * If we reach here, we need to place the new chunk in the
1563 * reassembly for this control.
1565 SCTPDBG(SCTP_DEBUG_XXX,
1566 "chunk is a not first fsn: %u needs to be inserted\n",
1568 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1569 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1571 * This one in queue is bigger than the new
1572 * one, insert the new one before at.
1574 SCTPDBG(SCTP_DEBUG_XXX,
1575 "Insert it before fsn: %u\n",
1577 asoc->size_on_reasm_queue += chk->send_size;
1578 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1579 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1582 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1584 * Gak, He sent me a duplicate str seq
1588 * foo bar, I guess I will just free this
1589 * new guy, should we abort too? FIX ME
1590 * MAYBE? Or it COULD be that the SSN's have
1591 * wrapped. Maybe I should compare to TSN
1592 * somehow... sigh for now just blow away
1595 SCTPDBG(SCTP_DEBUG_XXX,
1596 "Duplicate to fsn: %u -- abort\n",
1598 sctp_abort_in_reasm(stcb, control,
1600 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1604 if (inserted == 0) {
1605 /* Goes on the end */
1606 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1608 asoc->size_on_reasm_queue += chk->send_size;
1609 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1610 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1614 * Ok lets see if we can suck any up into the control structure that
1615 * are in seq if it makes sense.
1619 * If the first fragment has not been seen there is no sense in
1622 if (control->first_frag_seen) {
1623 next_fsn = control->fsn_included + 1;
1624 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1625 if (at->rec.data.fsn == next_fsn) {
1626 /* We can add this one now to the control */
1627 SCTPDBG(SCTP_DEBUG_XXX,
1628 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1631 next_fsn, control->fsn_included);
1632 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1633 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1634 if (control->on_read_q) {
1638 * We only add to the
1639 * size-on-all-streams if its not on
1640 * the read q. The read q flag will
1641 * cause a sballoc so its accounted
1644 asoc->size_on_all_streams += lenadded;
1647 if (control->end_added && control->pdapi_started) {
1648 if (strm->pd_api_started) {
1649 strm->pd_api_started = 0;
1650 control->pdapi_started = 0;
1652 if (control->on_read_q == 0) {
1653 sctp_add_to_readq(stcb->sctp_ep, stcb,
1655 &stcb->sctp_socket->so_rcv, control->end_added,
1656 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1666 /* Need to wakeup the reader */
1667 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1671 static struct sctp_queued_to_read *
1672 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1674 struct sctp_queued_to_read *control;
1677 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1678 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1683 if (idata_supported) {
1684 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1685 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1690 control = TAILQ_FIRST(&strm->uno_inqueue);
1697 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1698 struct mbuf **m, int offset, int chk_length,
1699 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1700 int *break_flag, int last_chunk, uint8_t chk_type)
1702 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1703 uint32_t tsn, fsn, gap, mid;
1706 int need_reasm_check = 0;
1708 struct mbuf *op_err;
1709 char msg[SCTP_DIAG_INFO_LEN];
1710 struct sctp_queued_to_read *control, *ncontrol;
1713 struct sctp_stream_reset_list *liste;
1716 int created_control = 0;
1718 if (chk_type == SCTP_IDATA) {
1719 struct sctp_idata_chunk *chunk, chunk_buf;
1721 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1722 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1723 chk_flags = chunk->ch.chunk_flags;
1724 clen = sizeof(struct sctp_idata_chunk);
1725 tsn = ntohl(chunk->dp.tsn);
1726 sid = ntohs(chunk->dp.sid);
1727 mid = ntohl(chunk->dp.mid);
1728 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1730 ppid = chunk->dp.ppid_fsn.ppid;
1732 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1733 ppid = 0xffffffff; /* Use as an invalid value. */
1736 struct sctp_data_chunk *chunk, chunk_buf;
1738 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1739 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1740 chk_flags = chunk->ch.chunk_flags;
1741 clen = sizeof(struct sctp_data_chunk);
1742 tsn = ntohl(chunk->dp.tsn);
1743 sid = ntohs(chunk->dp.sid);
1744 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1746 ppid = chunk->dp.ppid;
1748 if ((size_t)chk_length == clen) {
1750 * Need to send an abort since we had a empty data chunk.
1752 op_err = sctp_generate_no_user_data_cause(tsn);
1753 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1754 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1758 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1759 asoc->send_sack = 1;
1761 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1762 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1763 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1768 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1769 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1770 /* It is a duplicate */
1771 SCTP_STAT_INCR(sctps_recvdupdata);
1772 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1773 /* Record a dup for the next outbound sack */
1774 asoc->dup_tsns[asoc->numduptsns] = tsn;
1777 asoc->send_sack = 1;
1780 /* Calculate the number of TSN's between the base and this TSN */
1781 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1782 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1783 /* Can't hold the bit in the mapping at max array, toss it */
1786 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1787 SCTP_TCB_LOCK_ASSERT(stcb);
1788 if (sctp_expand_mapping_array(asoc, gap)) {
1789 /* Can't expand, drop it */
1793 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1796 /* See if we have received this one already */
1797 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1798 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1799 SCTP_STAT_INCR(sctps_recvdupdata);
1800 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1801 /* Record a dup for the next outbound sack */
1802 asoc->dup_tsns[asoc->numduptsns] = tsn;
1805 asoc->send_sack = 1;
1809 * Check to see about the GONE flag, duplicates would cause a sack
1810 * to be sent up above
1812 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1813 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1814 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1816 * wait a minute, this guy is gone, there is no longer a
1817 * receiver. Send peer an ABORT!
1819 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1820 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1825 * Now before going further we see if there is room. If NOT then we
1826 * MAY let one through only IF this TSN is the one we are waiting
1827 * for on a partial delivery API.
1830 /* Is the stream valid? */
1831 if (sid >= asoc->streamincnt) {
1832 struct sctp_error_invalid_stream *cause;
1834 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1835 0, M_NOWAIT, 1, MT_DATA);
1836 if (op_err != NULL) {
1837 /* add some space up front so prepend will work well */
1838 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1839 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1841 * Error causes are just param's and this one has
1842 * two back to back phdr, one with the error type
1843 * and size, the other with the streamid and a rsvd
1845 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1846 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1847 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1848 cause->stream_id = htons(sid);
1849 cause->reserved = htons(0);
1850 sctp_queue_op_err(stcb, op_err);
1852 SCTP_STAT_INCR(sctps_badsid);
1853 SCTP_TCB_LOCK_ASSERT(stcb);
1854 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1855 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1856 asoc->highest_tsn_inside_nr_map = tsn;
1858 if (tsn == (asoc->cumulative_tsn + 1)) {
1859 /* Update cum-ack */
1860 asoc->cumulative_tsn = tsn;
1865 * If its a fragmented message, lets see if we can find the control
1866 * on the reassembly queues.
1868 if ((chk_type == SCTP_IDATA) &&
1869 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1872 * The first *must* be fsn 0, and other (middle/end) pieces
1873 * can *not* be fsn 0. XXX: This can happen in case of a
1874 * wrap around. Ignore is for now.
1876 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1879 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1880 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1881 chk_flags, control);
1882 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1883 /* See if we can find the re-assembly entity */
1884 if (control != NULL) {
1885 /* We found something, does it belong? */
1886 if (ordered && (mid != control->mid)) {
1887 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1889 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1890 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1891 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1895 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1897 * We can't have a switched order with an
1900 SCTP_SNPRINTF(msg, sizeof(msg),
1901 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1905 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1907 * We can't have a switched unordered with a
1910 SCTP_SNPRINTF(msg, sizeof(msg),
1911 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1918 * Its a complete segment. Lets validate we don't have a
1919 * re-assembly going on with the same Stream/Seq (for
1920 * ordered) or in the same Stream for unordered.
1922 if (control != NULL) {
1923 if (ordered || asoc->idata_supported) {
1924 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1926 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1929 if ((tsn == control->fsn_included + 1) &&
1930 (control->end_added == 0)) {
1931 SCTP_SNPRINTF(msg, sizeof(msg),
1932 "Illegal message sequence, missing end for MID: %8.8x",
1933 control->fsn_included);
1941 /* now do the tests */
1942 if (((asoc->cnt_on_all_streams +
1943 asoc->cnt_on_reasm_queue +
1944 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1945 (((int)asoc->my_rwnd) <= 0)) {
1947 * When we have NO room in the rwnd we check to make sure
1948 * the reader is doing its job...
1950 if (stcb->sctp_socket->so_rcv.sb_cc) {
1951 /* some to read, wake-up */
1952 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1954 /* now is it in the mapping array of what we have accepted? */
1955 if (chk_type == SCTP_DATA) {
1956 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1957 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1958 /* Nope not in the valid range dump it */
1960 sctp_set_rwnd(stcb, asoc);
1961 if ((asoc->cnt_on_all_streams +
1962 asoc->cnt_on_reasm_queue +
1963 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1964 SCTP_STAT_INCR(sctps_datadropchklmt);
1966 SCTP_STAT_INCR(sctps_datadroprwnd);
1972 if (control == NULL) {
1975 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1980 #ifdef SCTP_ASOCLOG_OF_TSNS
1981 SCTP_TCB_LOCK_ASSERT(stcb);
1982 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1983 asoc->tsn_in_at = 0;
1984 asoc->tsn_in_wrapped = 1;
1986 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1987 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1988 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1989 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1990 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1991 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1992 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1993 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1997 * Before we continue lets validate that we are not being fooled by
1998 * an evil attacker. We can only have Nk chunks based on our TSN
1999 * spread allowed by the mapping array N * 8 bits, so there is no
2000 * way our stream sequence numbers could have wrapped. We of course
2001 * only validate the FIRST fragment so the bit must be set.
2003 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2004 (TAILQ_EMPTY(&asoc->resetHead)) &&
2005 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2006 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2007 /* The incoming sseq is behind where we last delivered? */
2008 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2009 mid, asoc->strmin[sid].last_mid_delivered);
2011 if (asoc->idata_supported) {
2012 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2013 asoc->strmin[sid].last_mid_delivered,
2018 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2019 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2024 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2025 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2026 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2030 if (chk_type == SCTP_IDATA) {
2031 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2033 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2035 if (last_chunk == 0) {
2036 if (chk_type == SCTP_IDATA) {
2037 dmbuf = SCTP_M_COPYM(*m,
2038 (offset + sizeof(struct sctp_idata_chunk)),
2041 dmbuf = SCTP_M_COPYM(*m,
2042 (offset + sizeof(struct sctp_data_chunk)),
2045 #ifdef SCTP_MBUF_LOGGING
2046 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2047 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2051 /* We can steal the last chunk */
2055 /* lop off the top part */
2056 if (chk_type == SCTP_IDATA) {
2057 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2059 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2061 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2062 l_len = SCTP_BUF_LEN(dmbuf);
2065 * need to count up the size hopefully does not hit
2071 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2072 l_len += SCTP_BUF_LEN(lat);
2075 if (l_len > the_len) {
2076 /* Trim the end round bytes off too */
2077 m_adj(dmbuf, -(l_len - the_len));
2080 if (dmbuf == NULL) {
2081 SCTP_STAT_INCR(sctps_nomem);
2085 * Now no matter what, we need a control, get one if we don't have
2086 * one (we may have gotten it above when we found the message was
2089 if (control == NULL) {
2090 sctp_alloc_a_readq(stcb, control);
2091 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2096 if (control == NULL) {
2097 SCTP_STAT_INCR(sctps_nomem);
2100 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2103 control->data = dmbuf;
2104 control->tail_mbuf = NULL;
2105 for (mm = control->data; mm; mm = mm->m_next) {
2106 control->length += SCTP_BUF_LEN(mm);
2107 if (SCTP_BUF_NEXT(mm) == NULL) {
2108 control->tail_mbuf = mm;
2111 control->end_added = 1;
2112 control->last_frag_seen = 1;
2113 control->first_frag_seen = 1;
2114 control->fsn_included = fsn;
2115 control->top_fsn = fsn;
2117 created_control = 1;
2119 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2120 chk_flags, ordered, mid, control);
2121 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2122 TAILQ_EMPTY(&asoc->resetHead) &&
2124 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2125 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2126 /* Candidate for express delivery */
2128 * Its not fragmented, No PD-API is up, Nothing in the
2129 * delivery queue, Its un-ordered OR ordered and the next to
2130 * deliver AND nothing else is stuck on the stream queue,
2131 * And there is room for it in the socket buffer. Lets just
2132 * stuff it up the buffer....
2134 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2135 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2136 asoc->highest_tsn_inside_nr_map = tsn;
2138 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2141 sctp_add_to_readq(stcb->sctp_ep, stcb,
2142 control, &stcb->sctp_socket->so_rcv,
2143 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2145 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2146 /* for ordered, bump what we delivered */
2147 asoc->strmin[sid].last_mid_delivered++;
2149 SCTP_STAT_INCR(sctps_recvexpress);
2150 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2151 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2152 SCTP_STR_LOG_FROM_EXPRS_DEL);
2155 goto finish_express_del;
2158 /* Now will we need a chunk too? */
2159 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2160 sctp_alloc_a_chunk(stcb, chk);
2162 /* No memory so we drop the chunk */
2163 SCTP_STAT_INCR(sctps_nomem);
2164 if (last_chunk == 0) {
2165 /* we copied it, free the copy */
2166 sctp_m_freem(dmbuf);
2170 chk->rec.data.tsn = tsn;
2171 chk->no_fr_allowed = 0;
2172 chk->rec.data.fsn = fsn;
2173 chk->rec.data.mid = mid;
2174 chk->rec.data.sid = sid;
2175 chk->rec.data.ppid = ppid;
2176 chk->rec.data.context = stcb->asoc.context;
2177 chk->rec.data.doing_fast_retransmit = 0;
2178 chk->rec.data.rcv_flags = chk_flags;
2180 chk->send_size = the_len;
2182 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2185 atomic_add_int(&net->ref_count, 1);
2188 /* Set the appropriate TSN mark */
2189 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2190 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2191 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2192 asoc->highest_tsn_inside_nr_map = tsn;
2195 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2196 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2197 asoc->highest_tsn_inside_map = tsn;
2200 /* Now is it complete (i.e. not fragmented)? */
2201 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2203 * Special check for when streams are resetting. We could be
2204 * more smart about this and check the actual stream to see
2205 * if it is not being reset.. that way we would not create a
2206 * HOLB when amongst streams being reset and those not being
2210 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2211 SCTP_TSN_GT(tsn, liste->tsn)) {
2213 * yep its past where we need to reset... go ahead
2216 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2218 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2220 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2221 unsigned char inserted = 0;
2223 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2224 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2229 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2234 if (inserted == 0) {
2236 * must be put at end, use prevP
2237 * (all setup from loop) to setup
2240 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2243 goto finish_express_del;
2245 if (chk_flags & SCTP_DATA_UNORDERED) {
2246 /* queue directly into socket buffer */
2247 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2249 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2250 sctp_add_to_readq(stcb->sctp_ep, stcb,
2252 &stcb->sctp_socket->so_rcv, 1,
2253 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2256 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2258 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2266 goto finish_express_del;
2268 /* If we reach here its a reassembly */
2269 need_reasm_check = 1;
2270 SCTPDBG(SCTP_DEBUG_XXX,
2271 "Queue data to stream for reasm control: %p MID: %u\n",
2273 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2276 * the assoc is now gone and chk was put onto the reasm
2277 * queue, which has all been freed.
2285 /* Here we tidy up things */
2286 if (tsn == (asoc->cumulative_tsn + 1)) {
2287 /* Update cum-ack */
2288 asoc->cumulative_tsn = tsn;
2294 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2296 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2298 SCTP_STAT_INCR(sctps_recvdata);
2299 /* Set it present please */
2300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2301 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2303 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2304 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2305 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2307 if (need_reasm_check) {
2308 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2309 need_reasm_check = 0;
2311 /* check the special flag for stream resets */
2312 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2313 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2315 * we have finished working through the backlogged TSN's now
2316 * time to reset streams. 1: call reset function. 2: free
2317 * pending_reply space 3: distribute any chunks in
2318 * pending_reply_queue.
2320 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2321 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2322 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2323 SCTP_FREE(liste, SCTP_M_STRESET);
2324 /* sa_ignore FREED_MEMORY */
2325 liste = TAILQ_FIRST(&asoc->resetHead);
2326 if (TAILQ_EMPTY(&asoc->resetHead)) {
2327 /* All can be removed */
2328 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2329 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2330 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2334 if (need_reasm_check) {
2335 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2336 need_reasm_check = 0;
2340 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2341 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2345 * if control->sinfo_tsn is <= liste->tsn we
2346 * can process it which is the NOT of
2347 * control->sinfo_tsn > liste->tsn
2349 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2350 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2354 if (need_reasm_check) {
2355 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2356 need_reasm_check = 0;
2364 static const int8_t sctp_map_lookup_tab[256] = {
2365 0, 1, 0, 2, 0, 1, 0, 3,
2366 0, 1, 0, 2, 0, 1, 0, 4,
2367 0, 1, 0, 2, 0, 1, 0, 3,
2368 0, 1, 0, 2, 0, 1, 0, 5,
2369 0, 1, 0, 2, 0, 1, 0, 3,
2370 0, 1, 0, 2, 0, 1, 0, 4,
2371 0, 1, 0, 2, 0, 1, 0, 3,
2372 0, 1, 0, 2, 0, 1, 0, 6,
2373 0, 1, 0, 2, 0, 1, 0, 3,
2374 0, 1, 0, 2, 0, 1, 0, 4,
2375 0, 1, 0, 2, 0, 1, 0, 3,
2376 0, 1, 0, 2, 0, 1, 0, 5,
2377 0, 1, 0, 2, 0, 1, 0, 3,
2378 0, 1, 0, 2, 0, 1, 0, 4,
2379 0, 1, 0, 2, 0, 1, 0, 3,
2380 0, 1, 0, 2, 0, 1, 0, 7,
2381 0, 1, 0, 2, 0, 1, 0, 3,
2382 0, 1, 0, 2, 0, 1, 0, 4,
2383 0, 1, 0, 2, 0, 1, 0, 3,
2384 0, 1, 0, 2, 0, 1, 0, 5,
2385 0, 1, 0, 2, 0, 1, 0, 3,
2386 0, 1, 0, 2, 0, 1, 0, 4,
2387 0, 1, 0, 2, 0, 1, 0, 3,
2388 0, 1, 0, 2, 0, 1, 0, 6,
2389 0, 1, 0, 2, 0, 1, 0, 3,
2390 0, 1, 0, 2, 0, 1, 0, 4,
2391 0, 1, 0, 2, 0, 1, 0, 3,
2392 0, 1, 0, 2, 0, 1, 0, 5,
2393 0, 1, 0, 2, 0, 1, 0, 3,
2394 0, 1, 0, 2, 0, 1, 0, 4,
2395 0, 1, 0, 2, 0, 1, 0, 3,
2396 0, 1, 0, 2, 0, 1, 0, 8
2401 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2404 * Now we also need to check the mapping array in a couple of ways.
2405 * 1) Did we move the cum-ack point?
2407 * When you first glance at this you might think that all entries
2408 * that make up the position of the cum-ack would be in the
2409 * nr-mapping array only.. i.e. things up to the cum-ack are always
2410 * deliverable. Thats true with one exception, when its a fragmented
2411 * message we may not deliver the data until some threshold (or all
2412 * of it) is in place. So we must OR the nr_mapping_array and
2413 * mapping_array to get a true picture of the cum-ack.
2415 struct sctp_association *asoc;
2418 int slide_from, slide_end, lgap, distance;
2419 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2423 old_cumack = asoc->cumulative_tsn;
2424 old_base = asoc->mapping_array_base_tsn;
2425 old_highest = asoc->highest_tsn_inside_map;
2427 * We could probably improve this a small bit by calculating the
2428 * offset of the current cum-ack as the starting point.
2431 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2432 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2436 /* there is a 0 bit */
2437 at += sctp_map_lookup_tab[val];
2441 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2443 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2444 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2446 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2447 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2449 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2450 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2451 sctp_print_mapping_array(asoc);
2452 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2453 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2455 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2456 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2459 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2460 highest_tsn = asoc->highest_tsn_inside_nr_map;
2462 highest_tsn = asoc->highest_tsn_inside_map;
2464 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2465 /* The complete array was completed by a single FR */
2466 /* highest becomes the cum-ack */
2472 /* clear the array */
2473 clr = ((at + 7) >> 3);
2474 if (clr > asoc->mapping_array_size) {
2475 clr = asoc->mapping_array_size;
2477 memset(asoc->mapping_array, 0, clr);
2478 memset(asoc->nr_mapping_array, 0, clr);
2480 for (i = 0; i < asoc->mapping_array_size; i++) {
2481 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2482 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2483 sctp_print_mapping_array(asoc);
2487 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2488 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2489 } else if (at >= 8) {
2490 /* we can slide the mapping array down */
2491 /* slide_from holds where we hit the first NON 0xff byte */
2494 * now calculate the ceiling of the move using our highest
2497 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2498 slide_end = (lgap >> 3);
2499 if (slide_end < slide_from) {
2500 sctp_print_mapping_array(asoc);
2502 panic("impossible slide");
2504 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2505 lgap, slide_end, slide_from, at);
2509 if (slide_end > asoc->mapping_array_size) {
2511 panic("would overrun buffer");
2513 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2514 asoc->mapping_array_size, slide_end);
2515 slide_end = asoc->mapping_array_size;
2518 distance = (slide_end - slide_from) + 1;
2519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2520 sctp_log_map(old_base, old_cumack, old_highest,
2521 SCTP_MAP_PREPARE_SLIDE);
2522 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2523 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2525 if (distance + slide_from > asoc->mapping_array_size ||
2528 * Here we do NOT slide forward the array so that
2529 * hopefully when more data comes in to fill it up
2530 * we will be able to slide it forward. Really I
2531 * don't think this should happen :-0
2534 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2535 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2536 (uint32_t)asoc->mapping_array_size,
2537 SCTP_MAP_SLIDE_NONE);
2542 for (ii = 0; ii < distance; ii++) {
2543 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2544 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2547 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2548 asoc->mapping_array[ii] = 0;
2549 asoc->nr_mapping_array[ii] = 0;
2551 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2552 asoc->highest_tsn_inside_map += (slide_from << 3);
2554 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2555 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2557 asoc->mapping_array_base_tsn += (slide_from << 3);
2558 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2559 sctp_log_map(asoc->mapping_array_base_tsn,
2560 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2561 SCTP_MAP_SLIDE_RESULT);
2568 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2570 struct sctp_association *asoc;
2571 uint32_t highest_tsn;
2574 sctp_slide_mapping_arrays(stcb);
2576 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2577 highest_tsn = asoc->highest_tsn_inside_nr_map;
2579 highest_tsn = asoc->highest_tsn_inside_map;
2581 /* Is there a gap now? */
2582 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2585 * Now we need to see if we need to queue a sack or just start the
2586 * timer (if allowed).
2588 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2590 * Ok special case, in SHUTDOWN-SENT case. here we maker
2591 * sure SACK timer is off and instead send a SHUTDOWN and a
2594 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2595 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2596 stcb->sctp_ep, stcb, NULL,
2597 SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2599 sctp_send_shutdown(stcb,
2600 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2602 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2606 * CMT DAC algorithm: increase number of packets received
2609 stcb->asoc.cmt_dac_pkts_rcvd++;
2611 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2613 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2615 (stcb->asoc.numduptsns) || /* we have dup's */
2616 (is_a_gap) || /* is still a gap */
2617 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2618 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2621 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2622 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2623 (stcb->asoc.send_sack == 0) &&
2624 (stcb->asoc.numduptsns == 0) &&
2625 (stcb->asoc.delayed_ack) &&
2626 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2629 * CMT DAC algorithm: With CMT, delay acks
2630 * even in the face of
2632 * reordering. Therefore, if acks that do
2633 * not have to be sent because of the above
2634 * reasons, will be delayed. That is, acks
2635 * that would have been sent due to gap
2636 * reports will be delayed with DAC. Start
2637 * the delayed ack timer.
2639 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2640 stcb->sctp_ep, stcb, NULL);
2643 * Ok we must build a SACK since the timer
2644 * is pending, we got our first packet OR
2645 * there are gaps or duplicates.
2647 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2648 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2649 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2652 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2653 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2654 stcb->sctp_ep, stcb, NULL);
2661 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2662 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2663 struct sctp_nets *net, uint32_t *high_tsn)
2665 struct sctp_chunkhdr *ch, chunk_buf;
2666 struct sctp_association *asoc;
2667 int num_chunks = 0; /* number of control chunks processed */
2669 int break_flag, last_chunk;
2670 int abort_flag = 0, was_a_gap;
2672 uint32_t highest_tsn;
2673 uint16_t chk_length;
2676 sctp_set_rwnd(stcb, &stcb->asoc);
2679 SCTP_TCB_LOCK_ASSERT(stcb);
2681 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2682 highest_tsn = asoc->highest_tsn_inside_nr_map;
2684 highest_tsn = asoc->highest_tsn_inside_map;
2686 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2688 * setup where we got the last DATA packet from for any SACK that
2689 * may need to go out. Don't bump the net. This is done ONLY when a
2690 * chunk is assigned.
2692 asoc->last_data_chunk_from = net;
2695 * Now before we proceed we must figure out if this is a wasted
2696 * cluster... i.e. it is a small packet sent in and yet the driver
2697 * underneath allocated a full cluster for it. If so we must copy it
2698 * to a smaller mbuf and free up the cluster mbuf. This will help
2699 * with cluster starvation.
2701 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2702 /* we only handle mbufs that are singletons.. not chains */
2703 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2705 /* ok lets see if we can copy the data up */
2708 /* get the pointers and copy */
2709 to = mtod(m, caddr_t *);
2710 from = mtod((*mm), caddr_t *);
2711 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2712 /* copy the length and free up the old */
2713 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2715 /* success, back copy */
2718 /* We are in trouble in the mbuf world .. yikes */
2722 /* get pointer to the first chunk header */
2723 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2724 sizeof(struct sctp_chunkhdr),
2725 (uint8_t *)&chunk_buf);
2730 * process all DATA chunks...
2732 *high_tsn = asoc->cumulative_tsn;
2734 asoc->data_pkts_seen++;
2735 while (stop_proc == 0) {
2736 /* validate chunk length */
2737 chk_length = ntohs(ch->chunk_length);
2738 if (length - *offset < chk_length) {
2739 /* all done, mutulated chunk */
2743 if ((asoc->idata_supported == 1) &&
2744 (ch->chunk_type == SCTP_DATA)) {
2745 struct mbuf *op_err;
2746 char msg[SCTP_DIAG_INFO_LEN];
2748 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2749 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2750 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2751 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2754 if ((asoc->idata_supported == 0) &&
2755 (ch->chunk_type == SCTP_IDATA)) {
2756 struct mbuf *op_err;
2757 char msg[SCTP_DIAG_INFO_LEN];
2759 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2760 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2761 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2762 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2765 if ((ch->chunk_type == SCTP_DATA) ||
2766 (ch->chunk_type == SCTP_IDATA)) {
2769 if (ch->chunk_type == SCTP_DATA) {
2770 clen = sizeof(struct sctp_data_chunk);
2772 clen = sizeof(struct sctp_idata_chunk);
2774 if (chk_length < clen) {
2776 * Need to send an abort since we had a
2777 * invalid data chunk.
2779 struct mbuf *op_err;
2780 char msg[SCTP_DIAG_INFO_LEN];
2782 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2783 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2785 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2786 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2787 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2790 #ifdef SCTP_AUDITING_ENABLED
2791 sctp_audit_log(0xB1, 0);
2793 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2798 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2799 chk_length, net, high_tsn, &abort_flag, &break_flag,
2800 last_chunk, ch->chunk_type)) {
2808 * Set because of out of rwnd space and no
2809 * drop rep space left.
2815 /* not a data chunk in the data region */
2816 switch (ch->chunk_type) {
2817 case SCTP_INITIATION:
2818 case SCTP_INITIATION_ACK:
2819 case SCTP_SELECTIVE_ACK:
2820 case SCTP_NR_SELECTIVE_ACK:
2821 case SCTP_HEARTBEAT_REQUEST:
2822 case SCTP_HEARTBEAT_ACK:
2823 case SCTP_ABORT_ASSOCIATION:
2825 case SCTP_SHUTDOWN_ACK:
2826 case SCTP_OPERATION_ERROR:
2827 case SCTP_COOKIE_ECHO:
2828 case SCTP_COOKIE_ACK:
2831 case SCTP_SHUTDOWN_COMPLETE:
2832 case SCTP_AUTHENTICATION:
2833 case SCTP_ASCONF_ACK:
2834 case SCTP_PACKET_DROPPED:
2835 case SCTP_STREAM_RESET:
2836 case SCTP_FORWARD_CUM_TSN:
2840 * Now, what do we do with KNOWN
2841 * chunks that are NOT in the right
2844 * For now, I do nothing but ignore
2845 * them. We may later want to add
2846 * sysctl stuff to switch out and do
2847 * either an ABORT() or possibly
2850 struct mbuf *op_err;
2851 char msg[SCTP_DIAG_INFO_LEN];
2853 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2855 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2856 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2861 * Unknown chunk type: use bit rules after
2864 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2866 * Need to send an abort since we
2867 * had a invalid chunk.
2869 struct mbuf *op_err;
2870 char msg[SCTP_DIAG_INFO_LEN];
2872 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2873 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2874 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2875 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2878 if (ch->chunk_type & 0x40) {
2879 /* Add a error report to the queue */
2880 struct mbuf *op_err;
2881 struct sctp_gen_error_cause *cause;
2883 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2884 0, M_NOWAIT, 1, MT_DATA);
2885 if (op_err != NULL) {
2886 cause = mtod(op_err, struct sctp_gen_error_cause *);
2887 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2888 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2889 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2890 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2891 if (SCTP_BUF_NEXT(op_err) != NULL) {
2892 sctp_queue_op_err(stcb, op_err);
2894 sctp_m_freem(op_err);
2898 if ((ch->chunk_type & 0x80) == 0) {
2899 /* discard the rest of this packet */
2901 } /* else skip this bad chunk and
2904 } /* switch of chunk type */
2906 *offset += SCTP_SIZE32(chk_length);
2907 if ((*offset >= length) || stop_proc) {
2908 /* no more data left in the mbuf chain */
2912 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2913 sizeof(struct sctp_chunkhdr),
2914 (uint8_t *)&chunk_buf);
2923 * we need to report rwnd overrun drops.
2925 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2929 * Did we get data, if so update the time for auto-close and
2930 * give peer credit for being alive.
2932 SCTP_STAT_INCR(sctps_recvpktwithdata);
2933 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2934 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2935 stcb->asoc.overall_error_count,
2937 SCTP_FROM_SCTP_INDATA,
2940 stcb->asoc.overall_error_count = 0;
2941 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2943 /* now service all of the reassm queue if needed */
2944 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2945 /* Assure that we ack right away */
2946 stcb->asoc.send_sack = 1;
2948 /* Start a sack timer or QUEUE a SACK for sending */
2949 sctp_sack_check(stcb, was_a_gap);
2954 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2955 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2957 uint32_t *biggest_newly_acked_tsn,
2958 uint32_t *this_sack_lowest_newack,
2961 struct sctp_tmit_chunk *tp1;
2962 unsigned int theTSN;
2963 int j, wake_him = 0, circled = 0;
2965 /* Recover the tp1 we last saw */
2968 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2970 for (j = frag_strt; j <= frag_end; j++) {
2971 theTSN = j + last_tsn;
2973 if (tp1->rec.data.doing_fast_retransmit)
2977 * CMT: CUCv2 algorithm. For each TSN being
2978 * processed from the sent queue, track the
2979 * next expected pseudo-cumack, or
2980 * rtx_pseudo_cumack, if required. Separate
2981 * cumack trackers for first transmissions,
2982 * and retransmissions.
2984 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2985 (tp1->whoTo->find_pseudo_cumack == 1) &&
2986 (tp1->snd_count == 1)) {
2987 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2988 tp1->whoTo->find_pseudo_cumack = 0;
2990 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2991 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2992 (tp1->snd_count > 1)) {
2993 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2994 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2996 if (tp1->rec.data.tsn == theTSN) {
2997 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2999 * must be held until
3002 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3004 * If it is less than RESEND, it is
3005 * now no-longer in flight.
3006 * Higher values may already be set
3007 * via previous Gap Ack Blocks...
3008 * i.e. ACKED or RESEND.
3010 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3011 *biggest_newly_acked_tsn)) {
3012 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3015 * CMT: SFR algo (and HTNA) - set
3016 * saw_newack to 1 for dest being
3017 * newly acked. update
3018 * this_sack_highest_newack if
3021 if (tp1->rec.data.chunk_was_revoked == 0)
3022 tp1->whoTo->saw_newack = 1;
3024 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3025 tp1->whoTo->this_sack_highest_newack)) {
3026 tp1->whoTo->this_sack_highest_newack =
3030 * CMT DAC algo: also update
3031 * this_sack_lowest_newack
3033 if (*this_sack_lowest_newack == 0) {
3034 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3035 sctp_log_sack(*this_sack_lowest_newack,
3040 SCTP_LOG_TSN_ACKED);
3042 *this_sack_lowest_newack = tp1->rec.data.tsn;
3045 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3046 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3047 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3048 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3049 * Separate pseudo_cumack trackers for first transmissions and
3052 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3053 if (tp1->rec.data.chunk_was_revoked == 0) {
3054 tp1->whoTo->new_pseudo_cumack = 1;
3056 tp1->whoTo->find_pseudo_cumack = 1;
3058 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3059 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3061 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3062 if (tp1->rec.data.chunk_was_revoked == 0) {
3063 tp1->whoTo->new_pseudo_cumack = 1;
3065 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3067 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3068 sctp_log_sack(*biggest_newly_acked_tsn,
3073 SCTP_LOG_TSN_ACKED);
3075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3076 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3077 tp1->whoTo->flight_size,
3079 (uint32_t)(uintptr_t)tp1->whoTo,
3082 sctp_flight_size_decrease(tp1);
3083 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3084 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3087 sctp_total_flight_decrease(stcb, tp1);
3089 tp1->whoTo->net_ack += tp1->send_size;
3090 if (tp1->snd_count < 2) {
3092 * True non-retransmitted chunk
3094 tp1->whoTo->net_ack2 += tp1->send_size;
3101 sctp_calculate_rto(stcb,
3104 &tp1->sent_rcv_time,
3105 SCTP_RTT_FROM_DATA)) {
3108 if (tp1->whoTo->rto_needed == 0) {
3109 tp1->whoTo->rto_needed = 1;
3116 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3117 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3118 stcb->asoc.this_sack_highest_gap)) {
3119 stcb->asoc.this_sack_highest_gap =
3122 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3123 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3124 #ifdef SCTP_AUDITING_ENABLED
3125 sctp_audit_log(0xB2,
3126 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3131 * All chunks NOT UNSENT fall through here and are marked
3132 * (leave PR-SCTP ones that are to skip alone though)
3134 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3135 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3136 tp1->sent = SCTP_DATAGRAM_MARKED;
3138 if (tp1->rec.data.chunk_was_revoked) {
3139 /* deflate the cwnd */
3140 tp1->whoTo->cwnd -= tp1->book_size;
3141 tp1->rec.data.chunk_was_revoked = 0;
3143 /* NR Sack code here */
3145 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3146 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3147 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3150 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3153 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3154 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3155 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3156 stcb->asoc.trigger_reset = 1;
3158 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3164 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3165 sctp_m_freem(tp1->data);
3172 } /* if (tp1->tsn == theTSN) */
3173 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3176 tp1 = TAILQ_NEXT(tp1, sctp_next);
3177 if ((tp1 == NULL) && (circled == 0)) {
3179 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3181 } /* end while (tp1) */
3184 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3186 /* In case the fragments were not in order we must reset */
3187 } /* end for (j = fragStart */
3189 return (wake_him); /* Return value only used for nr-sack */
3194 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3195 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3196 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3197 int num_seg, int num_nr_seg, int *rto_ok)
3199 struct sctp_gap_ack_block *frag, block;
3200 struct sctp_tmit_chunk *tp1;
3205 uint16_t frag_strt, frag_end, prev_frag_end;
3207 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3211 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3214 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3216 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3217 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3218 *offset += sizeof(block);
3220 return (chunk_freed);
3222 frag_strt = ntohs(frag->start);
3223 frag_end = ntohs(frag->end);
3225 if (frag_strt > frag_end) {
3226 /* This gap report is malformed, skip it. */
3229 if (frag_strt <= prev_frag_end) {
3230 /* This gap report is not in order, so restart. */
3231 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3233 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3234 *biggest_tsn_acked = last_tsn + frag_end;
3241 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3242 non_revocable, &num_frs, biggest_newly_acked_tsn,
3243 this_sack_lowest_newack, rto_ok)) {
3246 prev_frag_end = frag_end;
3248 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3250 sctp_log_fr(*biggest_tsn_acked,
3251 *biggest_newly_acked_tsn,
3252 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3254 return (chunk_freed);
3258 sctp_check_for_revoked(struct sctp_tcb *stcb,
3259 struct sctp_association *asoc, uint32_t cumack,
3260 uint32_t biggest_tsn_acked)
3262 struct sctp_tmit_chunk *tp1;
3264 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3265 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3267 * ok this guy is either ACK or MARKED. If it is
3268 * ACKED it has been previously acked but not this
3269 * time i.e. revoked. If it is MARKED it was ACK'ed
3272 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3275 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3276 /* it has been revoked */
3277 tp1->sent = SCTP_DATAGRAM_SENT;
3278 tp1->rec.data.chunk_was_revoked = 1;
3280 * We must add this stuff back in to assure
3281 * timers and such get started.
3283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3284 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3285 tp1->whoTo->flight_size,
3287 (uint32_t)(uintptr_t)tp1->whoTo,
3290 sctp_flight_size_increase(tp1);
3291 sctp_total_flight_increase(stcb, tp1);
3293 * We inflate the cwnd to compensate for our
3294 * artificial inflation of the flight_size.
3296 tp1->whoTo->cwnd += tp1->book_size;
3297 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3298 sctp_log_sack(asoc->last_acked_seq,
3303 SCTP_LOG_TSN_REVOKED);
3305 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3306 /* it has been re-acked in this SACK */
3307 tp1->sent = SCTP_DATAGRAM_ACKED;
3310 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3317 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3318 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3320 struct sctp_tmit_chunk *tp1;
3321 int strike_flag = 0;
3323 int tot_retrans = 0;
3324 uint32_t sending_seq;
3325 struct sctp_nets *net;
3326 int num_dests_sacked = 0;
3329 * select the sending_seq, this is either the next thing ready to be
3330 * sent but not transmitted, OR, the next seq we assign.
3332 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3334 sending_seq = asoc->sending_seq;
3336 sending_seq = tp1->rec.data.tsn;
3339 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3340 if ((asoc->sctp_cmt_on_off > 0) &&
3341 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3342 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3343 if (net->saw_newack)
3347 if (stcb->asoc.prsctp_supported) {
3348 (void)SCTP_GETTIME_TIMEVAL(&now);
3350 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3352 if (tp1->no_fr_allowed) {
3353 /* this one had a timeout or something */
3356 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3357 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3358 sctp_log_fr(biggest_tsn_newly_acked,
3361 SCTP_FR_LOG_CHECK_STRIKE);
3363 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3364 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3368 if (stcb->asoc.prsctp_supported) {
3369 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3370 /* Is it expired? */
3371 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3372 /* Yes so drop it */
3373 if (tp1->data != NULL) {
3374 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3375 SCTP_SO_NOT_LOCKED);
3382 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3383 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3384 /* we are beyond the tsn in the sack */
3387 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3388 /* either a RESEND, ACKED, or MARKED */
3390 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3391 /* Continue strikin FWD-TSN chunks */
3392 tp1->rec.data.fwd_tsn_cnt++;
3397 * CMT : SFR algo (covers part of DAC and HTNA as well)
3399 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3401 * No new acks were receieved for data sent to this
3402 * dest. Therefore, according to the SFR algo for
3403 * CMT, no data sent to this dest can be marked for
3404 * FR using this SACK.
3407 } else if (tp1->whoTo &&
3408 SCTP_TSN_GT(tp1->rec.data.tsn,
3409 tp1->whoTo->this_sack_highest_newack) &&
3410 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3412 * CMT: New acks were receieved for data sent to
3413 * this dest. But no new acks were seen for data
3414 * sent after tp1. Therefore, according to the SFR
3415 * algo for CMT, tp1 cannot be marked for FR using
3416 * this SACK. This step covers part of the DAC algo
3417 * and the HTNA algo as well.
3422 * Here we check to see if we were have already done a FR
3423 * and if so we see if the biggest TSN we saw in the sack is
3424 * smaller than the recovery point. If so we don't strike
3425 * the tsn... otherwise we CAN strike the TSN.
3428 * @@@ JRI: Check for CMT if (accum_moved &&
3429 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3432 if (accum_moved && asoc->fast_retran_loss_recovery) {
3434 * Strike the TSN if in fast-recovery and cum-ack
3437 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3438 sctp_log_fr(biggest_tsn_newly_acked,
3441 SCTP_FR_LOG_STRIKE_CHUNK);
3443 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3446 if ((asoc->sctp_cmt_on_off > 0) &&
3447 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3449 * CMT DAC algorithm: If SACK flag is set to
3450 * 0, then lowest_newack test will not pass
3451 * because it would have been set to the
3452 * cumack earlier. If not already to be
3453 * rtx'd, If not a mixed sack and if tp1 is
3454 * not between two sacked TSNs, then mark by
3455 * one more. NOTE that we are marking by one
3456 * additional time since the SACK DAC flag
3457 * indicates that two packets have been
3458 * received after this missing TSN.
3460 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3461 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3462 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3463 sctp_log_fr(16 + num_dests_sacked,
3466 SCTP_FR_LOG_STRIKE_CHUNK);
3471 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3472 (asoc->sctp_cmt_on_off == 0)) {
3474 * For those that have done a FR we must take
3475 * special consideration if we strike. I.e the
3476 * biggest_newly_acked must be higher than the
3477 * sending_seq at the time we did the FR.
3480 #ifdef SCTP_FR_TO_ALTERNATE
3482 * If FR's go to new networks, then we must only do
3483 * this for singly homed asoc's. However if the FR's
3484 * go to the same network (Armando's work) then its
3485 * ok to FR multiple times.
3493 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3494 tp1->rec.data.fast_retran_tsn)) {
3496 * Strike the TSN, since this ack is
3497 * beyond where things were when we
3500 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3501 sctp_log_fr(biggest_tsn_newly_acked,
3504 SCTP_FR_LOG_STRIKE_CHUNK);
3506 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3510 if ((asoc->sctp_cmt_on_off > 0) &&
3511 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3513 * CMT DAC algorithm: If
3514 * SACK flag is set to 0,
3515 * then lowest_newack test
3516 * will not pass because it
3517 * would have been set to
3518 * the cumack earlier. If
3519 * not already to be rtx'd,
3520 * If not a mixed sack and
3521 * if tp1 is not between two
3522 * sacked TSNs, then mark by
3523 * one more. NOTE that we
3524 * are marking by one
3525 * additional time since the
3526 * SACK DAC flag indicates
3527 * that two packets have
3528 * been received after this
3531 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3532 (num_dests_sacked == 1) &&
3533 SCTP_TSN_GT(this_sack_lowest_newack,
3534 tp1->rec.data.tsn)) {
3535 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3536 sctp_log_fr(32 + num_dests_sacked,
3539 SCTP_FR_LOG_STRIKE_CHUNK);
3541 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3549 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3552 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3553 biggest_tsn_newly_acked)) {
3555 * We don't strike these: This is the HTNA
3556 * algorithm i.e. we don't strike If our TSN is
3557 * larger than the Highest TSN Newly Acked.
3561 /* Strike the TSN */
3562 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3563 sctp_log_fr(biggest_tsn_newly_acked,
3566 SCTP_FR_LOG_STRIKE_CHUNK);
3568 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3571 if ((asoc->sctp_cmt_on_off > 0) &&
3572 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3574 * CMT DAC algorithm: If SACK flag is set to
3575 * 0, then lowest_newack test will not pass
3576 * because it would have been set to the
3577 * cumack earlier. If not already to be
3578 * rtx'd, If not a mixed sack and if tp1 is
3579 * not between two sacked TSNs, then mark by
3580 * one more. NOTE that we are marking by one
3581 * additional time since the SACK DAC flag
3582 * indicates that two packets have been
3583 * received after this missing TSN.
3585 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3586 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3587 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3588 sctp_log_fr(48 + num_dests_sacked,
3591 SCTP_FR_LOG_STRIKE_CHUNK);
3597 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3598 struct sctp_nets *alt;
3600 /* fix counts and things */
3601 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3602 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3603 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3605 (uint32_t)(uintptr_t)tp1->whoTo,
3609 tp1->whoTo->net_ack++;
3610 sctp_flight_size_decrease(tp1);
3611 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3612 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3618 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3619 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3621 /* add back to the rwnd */
3622 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3624 /* remove from the total flight */
3625 sctp_total_flight_decrease(stcb, tp1);
3627 if ((stcb->asoc.prsctp_supported) &&
3628 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3630 * Has it been retransmitted tv_sec times? -
3631 * we store the retran count there.
3633 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3634 /* Yes, so drop it */
3635 if (tp1->data != NULL) {
3636 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3637 SCTP_SO_NOT_LOCKED);
3639 /* Make sure to flag we had a FR */
3640 if (tp1->whoTo != NULL) {
3641 tp1->whoTo->net_ack++;
3647 * SCTP_PRINTF("OK, we are now ready to FR this
3650 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3651 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3655 /* This is a subsequent FR */
3656 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3658 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3659 if (asoc->sctp_cmt_on_off > 0) {
3661 * CMT: Using RTX_SSTHRESH policy for CMT.
3662 * If CMT is being used, then pick dest with
3663 * largest ssthresh for any retransmission.
3665 tp1->no_fr_allowed = 1;
3667 /* sa_ignore NO_NULL_CHK */
3668 if (asoc->sctp_cmt_pf > 0) {
3670 * JRS 5/18/07 - If CMT PF is on,
3671 * use the PF version of
3674 alt = sctp_find_alternate_net(stcb, alt, 2);
3677 * JRS 5/18/07 - If only CMT is on,
3678 * use the CMT version of
3681 /* sa_ignore NO_NULL_CHK */
3682 alt = sctp_find_alternate_net(stcb, alt, 1);
3688 * CUCv2: If a different dest is picked for
3689 * the retransmission, then new
3690 * (rtx-)pseudo_cumack needs to be tracked
3691 * for orig dest. Let CUCv2 track new (rtx-)
3692 * pseudo-cumack always.
3695 tp1->whoTo->find_pseudo_cumack = 1;
3696 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3699 } else { /* CMT is OFF */
3701 #ifdef SCTP_FR_TO_ALTERNATE
3702 /* Can we find an alternate? */
3703 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3706 * default behavior is to NOT retransmit
3707 * FR's to an alternate. Armando Caro's
3708 * paper details why.
3714 tp1->rec.data.doing_fast_retransmit = 1;
3716 /* mark the sending seq for possible subsequent FR's */
3718 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3719 * (uint32_t)tpi->rec.data.tsn);
3721 if (TAILQ_EMPTY(&asoc->send_queue)) {
3723 * If the queue of send is empty then its
3724 * the next sequence number that will be
3725 * assigned so we subtract one from this to
3726 * get the one we last sent.
3728 tp1->rec.data.fast_retran_tsn = sending_seq;
3731 * If there are chunks on the send queue
3732 * (unsent data that has made it from the
3733 * stream queues but not out the door, we
3734 * take the first one (which will have the
3735 * lowest TSN) and subtract one to get the
3738 struct sctp_tmit_chunk *ttt;
3740 ttt = TAILQ_FIRST(&asoc->send_queue);
3741 tp1->rec.data.fast_retran_tsn =
3747 * this guy had a RTO calculation pending on
3750 if ((tp1->whoTo != NULL) &&
3751 (tp1->whoTo->rto_needed == 0)) {
3752 tp1->whoTo->rto_needed = 1;
3756 if (alt != tp1->whoTo) {
3757 /* yes, there is an alternate. */
3758 sctp_free_remote_addr(tp1->whoTo);
3759 /* sa_ignore FREED_MEMORY */
3761 atomic_add_int(&alt->ref_count, 1);
3767 struct sctp_tmit_chunk *
3768 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3769 struct sctp_association *asoc)
3771 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3775 if (asoc->prsctp_supported == 0) {
3778 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3779 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3780 tp1->sent != SCTP_DATAGRAM_RESEND &&
3781 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3782 /* no chance to advance, out of here */
3785 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3786 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3787 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3788 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3789 asoc->advanced_peer_ack_point,
3790 tp1->rec.data.tsn, 0, 0);
3793 if (!PR_SCTP_ENABLED(tp1->flags)) {
3795 * We can't fwd-tsn past any that are reliable aka
3796 * retransmitted until the asoc fails.
3801 (void)SCTP_GETTIME_TIMEVAL(&now);
3805 * now we got a chunk which is marked for another
3806 * retransmission to a PR-stream but has run out its chances
3807 * already maybe OR has been marked to skip now. Can we skip
3808 * it if its a resend?
3810 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3811 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3813 * Now is this one marked for resend and its time is
3816 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3817 /* Yes so drop it */
3819 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3820 1, SCTP_SO_NOT_LOCKED);
3824 * No, we are done when hit one for resend
3825 * whos time as not expired.
3831 * Ok now if this chunk is marked to drop it we can clean up
3832 * the chunk, advance our peer ack point and we can check
3835 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3836 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3837 /* advance PeerAckPoint goes forward */
3838 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3839 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3841 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3842 /* No update but we do save the chk */
3847 * If it is still in RESEND we can advance no
3857 sctp_fs_audit(struct sctp_association *asoc)
3859 struct sctp_tmit_chunk *chk;
3860 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3863 int entry_flight, entry_cnt;
3868 entry_flight = asoc->total_flight;
3869 entry_cnt = asoc->total_flight_count;
3871 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3874 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3875 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3876 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3881 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3883 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3885 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3892 if ((inflight > 0) || (inbetween > 0)) {
3894 panic("Flight size-express incorrect? \n");
3896 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3897 entry_flight, entry_cnt);
3899 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3900 inflight, inbetween, resend, above, acked);
3909 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3910 struct sctp_association *asoc,
3911 struct sctp_tmit_chunk *tp1)
3913 tp1->window_probe = 0;
3914 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3915 /* TSN's skipped we do NOT move back. */
3916 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3917 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3919 (uint32_t)(uintptr_t)tp1->whoTo,
3923 /* First setup this by shrinking flight */
3924 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3925 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3928 sctp_flight_size_decrease(tp1);
3929 sctp_total_flight_decrease(stcb, tp1);
3930 /* Now mark for resend */
3931 tp1->sent = SCTP_DATAGRAM_RESEND;
3932 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3934 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3935 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3936 tp1->whoTo->flight_size,
3938 (uint32_t)(uintptr_t)tp1->whoTo,
3944 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3945 uint32_t rwnd, int *abort_now, int ecne_seen)
3947 struct sctp_nets *net;
3948 struct sctp_association *asoc;
3949 struct sctp_tmit_chunk *tp1, *tp2;
3951 int win_probe_recovery = 0;
3952 int win_probe_recovered = 0;
3953 int j, done_once = 0;
3957 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3958 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3959 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3961 SCTP_TCB_LOCK_ASSERT(stcb);
3962 #ifdef SCTP_ASOCLOG_OF_TSNS
3963 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3964 stcb->asoc.cumack_log_at++;
3965 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3966 stcb->asoc.cumack_log_at = 0;
3970 old_rwnd = asoc->peers_rwnd;
3971 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3974 } else if (asoc->last_acked_seq == cumack) {
3975 /* Window update sack */
3976 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3977 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3978 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3979 /* SWS sender side engages */
3980 asoc->peers_rwnd = 0;
3982 if (asoc->peers_rwnd > old_rwnd) {
3988 /* First setup for CC stuff */
3989 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3990 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3991 /* Drag along the window_tsn for cwr's */
3992 net->cwr_window_tsn = cumack;
3994 net->prev_cwnd = net->cwnd;
3999 * CMT: Reset CUC and Fast recovery algo variables before
4002 net->new_pseudo_cumack = 0;
4003 net->will_exit_fast_recovery = 0;
4004 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4005 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4008 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4009 tp1 = TAILQ_LAST(&asoc->sent_queue,
4010 sctpchunk_listhead);
4011 send_s = tp1->rec.data.tsn + 1;
4013 send_s = asoc->sending_seq;
4015 if (SCTP_TSN_GE(cumack, send_s)) {
4016 struct mbuf *op_err;
4017 char msg[SCTP_DIAG_INFO_LEN];
4021 SCTP_SNPRINTF(msg, sizeof(msg),
4022 "Cum ack %8.8x greater or equal than TSN %8.8x",
4024 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4025 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4026 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4029 asoc->this_sack_highest_gap = cumack;
4030 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4031 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4032 stcb->asoc.overall_error_count,
4034 SCTP_FROM_SCTP_INDATA,
4037 stcb->asoc.overall_error_count = 0;
4038 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4039 /* process the new consecutive TSN first */
4040 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4041 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4042 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4043 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4045 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4047 * If it is less than ACKED, it is
4048 * now no-longer in flight. Higher
4049 * values may occur during marking
4051 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4052 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4053 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4054 tp1->whoTo->flight_size,
4056 (uint32_t)(uintptr_t)tp1->whoTo,
4059 sctp_flight_size_decrease(tp1);
4060 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4061 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4064 /* sa_ignore NO_NULL_CHK */
4065 sctp_total_flight_decrease(stcb, tp1);
4067 tp1->whoTo->net_ack += tp1->send_size;
4068 if (tp1->snd_count < 2) {
4070 * True non-retransmitted
4073 tp1->whoTo->net_ack2 +=
4076 /* update RTO too? */
4079 sctp_calculate_rto(stcb,
4082 &tp1->sent_rcv_time,
4083 SCTP_RTT_FROM_DATA)) {
4086 if (tp1->whoTo->rto_needed == 0) {
4087 tp1->whoTo->rto_needed = 1;
4093 * CMT: CUCv2 algorithm. From the
4094 * cumack'd TSNs, for each TSN being
4095 * acked for the first time, set the
4096 * following variables for the
4097 * corresp destination.
4098 * new_pseudo_cumack will trigger a
4100 * find_(rtx_)pseudo_cumack will
4101 * trigger search for the next
4102 * expected (rtx-)pseudo-cumack.
4104 tp1->whoTo->new_pseudo_cumack = 1;
4105 tp1->whoTo->find_pseudo_cumack = 1;
4106 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4108 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4109 /* sa_ignore NO_NULL_CHK */
4110 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4113 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4114 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4116 if (tp1->rec.data.chunk_was_revoked) {
4117 /* deflate the cwnd */
4118 tp1->whoTo->cwnd -= tp1->book_size;
4119 tp1->rec.data.chunk_was_revoked = 0;
4121 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4122 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4123 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4126 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4130 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4131 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4132 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4133 asoc->trigger_reset = 1;
4135 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4137 /* sa_ignore NO_NULL_CHK */
4138 sctp_free_bufspace(stcb, asoc, tp1, 1);
4139 sctp_m_freem(tp1->data);
4142 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4143 sctp_log_sack(asoc->last_acked_seq,
4148 SCTP_LOG_FREE_SENT);
4150 asoc->sent_queue_cnt--;
4151 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4158 /* sa_ignore NO_NULL_CHK */
4159 if (stcb->sctp_socket) {
4160 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4161 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4162 /* sa_ignore NO_NULL_CHK */
4163 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4165 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4167 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4168 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4172 /* JRS - Use the congestion control given in the CC module */
4173 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4174 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4175 if (net->net_ack2 > 0) {
4177 * Karn's rule applies to clearing error
4178 * count, this is optional.
4180 net->error_count = 0;
4181 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4182 /* addr came good */
4183 net->dest_state |= SCTP_ADDR_REACHABLE;
4184 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4185 0, (void *)net, SCTP_SO_NOT_LOCKED);
4187 if (net == stcb->asoc.primary_destination) {
4188 if (stcb->asoc.alternate) {
4190 * release the alternate,
4193 sctp_free_remote_addr(stcb->asoc.alternate);
4194 stcb->asoc.alternate = NULL;
4197 if (net->dest_state & SCTP_ADDR_PF) {
4198 net->dest_state &= ~SCTP_ADDR_PF;
4199 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4200 stcb->sctp_ep, stcb, net,
4201 SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4202 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4203 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4204 /* Done with this net */
4207 /* restore any doubled timers */
4208 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4209 if (net->RTO < stcb->asoc.minrto) {
4210 net->RTO = stcb->asoc.minrto;
4212 if (net->RTO > stcb->asoc.maxrto) {
4213 net->RTO = stcb->asoc.maxrto;
4217 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4219 asoc->last_acked_seq = cumack;
4221 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4222 /* nothing left in-flight */
4223 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4224 net->flight_size = 0;
4225 net->partial_bytes_acked = 0;
4227 asoc->total_flight = 0;
4228 asoc->total_flight_count = 0;
4232 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4233 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4234 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4235 /* SWS sender side engages */
4236 asoc->peers_rwnd = 0;
4238 if (asoc->peers_rwnd > old_rwnd) {
4239 win_probe_recovery = 1;
4241 /* Now assure a timer where data is queued at */
4244 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4245 if (win_probe_recovery && (net->window_probe)) {
4246 win_probe_recovered = 1;
4248 * Find first chunk that was used with window probe
4249 * and clear the sent
4251 /* sa_ignore FREED_MEMORY */
4252 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4253 if (tp1->window_probe) {
4254 /* move back to data send queue */
4255 sctp_window_probe_recovery(stcb, asoc, tp1);
4260 if (net->flight_size) {
4262 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4263 if (net->window_probe) {
4264 net->window_probe = 0;
4267 if (net->window_probe) {
4269 * In window probes we must assure a timer
4270 * is still running there
4272 net->window_probe = 0;
4273 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4274 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4276 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4277 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4279 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4284 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4285 (asoc->sent_queue_retran_cnt == 0) &&
4286 (win_probe_recovered == 0) &&
4289 * huh, this should not happen unless all packets are
4290 * PR-SCTP and marked to skip of course.
4292 if (sctp_fs_audit(asoc)) {
4293 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4294 net->flight_size = 0;
4296 asoc->total_flight = 0;
4297 asoc->total_flight_count = 0;
4298 asoc->sent_queue_retran_cnt = 0;
4299 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4300 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4301 sctp_flight_size_increase(tp1);
4302 sctp_total_flight_increase(stcb, tp1);
4303 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4304 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4311 /**********************************/
4312 /* Now what about shutdown issues */
4313 /**********************************/
4314 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4315 /* nothing left on sendqueue.. consider done */
4317 if ((asoc->stream_queue_cnt == 1) &&
4318 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4319 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4320 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4321 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4323 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4324 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4325 (asoc->stream_queue_cnt == 1) &&
4326 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4327 struct mbuf *op_err;
4331 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4332 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4333 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4336 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4337 (asoc->stream_queue_cnt == 0)) {
4338 struct sctp_nets *netp;
4340 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4341 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4342 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4344 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4345 sctp_stop_timers_for_shutdown(stcb);
4346 if (asoc->alternate) {
4347 netp = asoc->alternate;
4349 netp = asoc->primary_destination;
4351 sctp_send_shutdown(stcb, netp);
4352 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4353 stcb->sctp_ep, stcb, netp);
4354 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4355 stcb->sctp_ep, stcb, NULL);
4356 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4357 (asoc->stream_queue_cnt == 0)) {
4358 struct sctp_nets *netp;
4360 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4361 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4362 sctp_stop_timers_for_shutdown(stcb);
4363 if (asoc->alternate) {
4364 netp = asoc->alternate;
4366 netp = asoc->primary_destination;
4368 sctp_send_shutdown_ack(stcb, netp);
4369 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4370 stcb->sctp_ep, stcb, netp);
4373 /*********************************************/
4374 /* Here we perform PR-SCTP procedures */
4376 /*********************************************/
4377 /* C1. update advancedPeerAckPoint */
4378 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4379 asoc->advanced_peer_ack_point = cumack;
4381 /* PR-Sctp issues need to be addressed too */
4382 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4383 struct sctp_tmit_chunk *lchk;
4384 uint32_t old_adv_peer_ack_point;
4386 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4387 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4388 /* C3. See if we need to send a Fwd-TSN */
4389 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4391 * ISSUE with ECN, see FWD-TSN processing.
4393 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4394 send_forward_tsn(stcb, asoc);
4396 /* try to FR fwd-tsn's that get lost too */
4397 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4398 send_forward_tsn(stcb, asoc);
4402 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4403 if (lchk->whoTo != NULL) {
4408 /* Assure a timer is up */
4409 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4410 stcb->sctp_ep, stcb, lchk->whoTo);
4413 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4414 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4416 stcb->asoc.peers_rwnd,
4417 stcb->asoc.total_flight,
4418 stcb->asoc.total_output_queue_size);
4423 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4424 struct sctp_tcb *stcb,
4425 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4426 int *abort_now, uint8_t flags,
4427 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4429 struct sctp_association *asoc;
4430 struct sctp_tmit_chunk *tp1, *tp2;
4431 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4432 uint16_t wake_him = 0;
4433 uint32_t send_s = 0;
4435 int accum_moved = 0;
4436 int will_exit_fast_recovery = 0;
4437 uint32_t a_rwnd, old_rwnd;
4438 int win_probe_recovery = 0;
4439 int win_probe_recovered = 0;
4440 struct sctp_nets *net = NULL;
4443 uint8_t reneged_all = 0;
4444 uint8_t cmt_dac_flag;
4447 * we take any chance we can to service our queues since we cannot
4448 * get awoken when the socket is read from :<
4451 * Now perform the actual SACK handling: 1) Verify that it is not an
4452 * old sack, if so discard. 2) If there is nothing left in the send
4453 * queue (cum-ack is equal to last acked) then you have a duplicate
4454 * too, update any rwnd change and verify no timers are running.
4455 * then return. 3) Process any new consequtive data i.e. cum-ack
4456 * moved process these first and note that it moved. 4) Process any
4457 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4458 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4459 * sync up flightsizes and things, stop all timers and also check
4460 * for shutdown_pending state. If so then go ahead and send off the
4461 * shutdown. If in shutdown recv, send off the shutdown-ack and
4462 * start that timer, Ret. 9) Strike any non-acked things and do FR
4463 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4464 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4465 * if in shutdown_recv state.
4467 SCTP_TCB_LOCK_ASSERT(stcb);
4469 this_sack_lowest_newack = 0;
4470 SCTP_STAT_INCR(sctps_slowpath_sack);
4472 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4473 #ifdef SCTP_ASOCLOG_OF_TSNS
4474 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4475 stcb->asoc.cumack_log_at++;
4476 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4477 stcb->asoc.cumack_log_at = 0;
4482 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4483 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4484 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4487 old_rwnd = stcb->asoc.peers_rwnd;
4488 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4489 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4490 stcb->asoc.overall_error_count,
4492 SCTP_FROM_SCTP_INDATA,
4495 stcb->asoc.overall_error_count = 0;
4497 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4498 sctp_log_sack(asoc->last_acked_seq,
4505 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4507 uint32_t *dupdata, dblock;
4509 for (i = 0; i < num_dup; i++) {
4510 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4511 sizeof(uint32_t), (uint8_t *)&dblock);
4512 if (dupdata == NULL) {
4515 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4519 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4520 tp1 = TAILQ_LAST(&asoc->sent_queue,
4521 sctpchunk_listhead);
4522 send_s = tp1->rec.data.tsn + 1;
4525 send_s = asoc->sending_seq;
4527 if (SCTP_TSN_GE(cum_ack, send_s)) {
4528 struct mbuf *op_err;
4529 char msg[SCTP_DIAG_INFO_LEN];
4532 * no way, we have not even sent this TSN out yet. Peer is
4533 * hopelessly messed up with us.
4535 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4538 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4539 tp1->rec.data.tsn, (void *)tp1);
4544 SCTP_SNPRINTF(msg, sizeof(msg),
4545 "Cum ack %8.8x greater or equal than TSN %8.8x",
4547 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4548 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4549 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4552 /**********************/
4553 /* 1) check the range */
4554 /**********************/
4555 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4556 /* acking something behind */
4560 /* update the Rwnd of the peer */
4561 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4562 TAILQ_EMPTY(&asoc->send_queue) &&
4563 (asoc->stream_queue_cnt == 0)) {
4564 /* nothing left on send/sent and strmq */
4565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4566 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4567 asoc->peers_rwnd, 0, 0, a_rwnd);
4569 asoc->peers_rwnd = a_rwnd;
4570 if (asoc->sent_queue_retran_cnt) {
4571 asoc->sent_queue_retran_cnt = 0;
4573 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4574 /* SWS sender side engages */
4575 asoc->peers_rwnd = 0;
4577 /* stop any timers */
4578 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4579 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4580 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4581 net->partial_bytes_acked = 0;
4582 net->flight_size = 0;
4584 asoc->total_flight = 0;
4585 asoc->total_flight_count = 0;
4589 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4590 * things. The total byte count acked is tracked in netAckSz AND
4591 * netAck2 is used to track the total bytes acked that are un-
4592 * amibguious and were never retransmitted. We track these on a per
4593 * destination address basis.
4595 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4596 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4597 /* Drag along the window_tsn for cwr's */
4598 net->cwr_window_tsn = cum_ack;
4600 net->prev_cwnd = net->cwnd;
4605 * CMT: Reset CUC and Fast recovery algo variables before
4608 net->new_pseudo_cumack = 0;
4609 net->will_exit_fast_recovery = 0;
4610 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4611 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4615 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4616 * to be greater than the cumack. Also reset saw_newack to 0
4619 net->saw_newack = 0;
4620 net->this_sack_highest_newack = last_tsn;
4622 /* process the new consecutive TSN first */
4623 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4624 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4625 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4627 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4629 * If it is less than ACKED, it is
4630 * now no-longer in flight. Higher
4631 * values may occur during marking
4633 if ((tp1->whoTo->dest_state &
4634 SCTP_ADDR_UNCONFIRMED) &&
4635 (tp1->snd_count < 2)) {
4637 * If there was no retran
4638 * and the address is
4639 * un-confirmed and we sent
4641 * sacked.. its confirmed,
4644 tp1->whoTo->dest_state &=
4645 ~SCTP_ADDR_UNCONFIRMED;
4647 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4648 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4649 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4650 tp1->whoTo->flight_size,
4652 (uint32_t)(uintptr_t)tp1->whoTo,
4655 sctp_flight_size_decrease(tp1);
4656 sctp_total_flight_decrease(stcb, tp1);
4657 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4658 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4662 tp1->whoTo->net_ack += tp1->send_size;
4664 /* CMT SFR and DAC algos */
4665 this_sack_lowest_newack = tp1->rec.data.tsn;
4666 tp1->whoTo->saw_newack = 1;
4668 if (tp1->snd_count < 2) {
4670 * True non-retransmitted
4673 tp1->whoTo->net_ack2 +=
4676 /* update RTO too? */
4679 sctp_calculate_rto(stcb,
4682 &tp1->sent_rcv_time,
4683 SCTP_RTT_FROM_DATA)) {
4686 if (tp1->whoTo->rto_needed == 0) {
4687 tp1->whoTo->rto_needed = 1;
4693 * CMT: CUCv2 algorithm. From the
4694 * cumack'd TSNs, for each TSN being
4695 * acked for the first time, set the
4696 * following variables for the
4697 * corresp destination.
4698 * new_pseudo_cumack will trigger a
4700 * find_(rtx_)pseudo_cumack will
4701 * trigger search for the next
4702 * expected (rtx-)pseudo-cumack.
4704 tp1->whoTo->new_pseudo_cumack = 1;
4705 tp1->whoTo->find_pseudo_cumack = 1;
4706 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4709 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4710 sctp_log_sack(asoc->last_acked_seq,
4715 SCTP_LOG_TSN_ACKED);
4717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4718 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4721 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4722 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4723 #ifdef SCTP_AUDITING_ENABLED
4724 sctp_audit_log(0xB3,
4725 (asoc->sent_queue_retran_cnt & 0x000000ff));
4728 if (tp1->rec.data.chunk_was_revoked) {
4729 /* deflate the cwnd */
4730 tp1->whoTo->cwnd -= tp1->book_size;
4731 tp1->rec.data.chunk_was_revoked = 0;
4733 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4734 tp1->sent = SCTP_DATAGRAM_ACKED;
4741 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4742 /* always set this up to cum-ack */
4743 asoc->this_sack_highest_gap = last_tsn;
4745 if ((num_seg > 0) || (num_nr_seg > 0)) {
4748 * thisSackHighestGap will increase while handling NEW
4749 * segments this_sack_highest_newack will increase while
4750 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4751 * used for CMT DAC algo. saw_newack will also change.
4753 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4754 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4755 num_seg, num_nr_seg, &rto_ok)) {
4759 * validate the biggest_tsn_acked in the gap acks if strict
4760 * adherence is wanted.
4762 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4764 * peer is either confused or we are under attack.
4767 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4768 biggest_tsn_acked, send_s);
4772 /*******************************************/
4773 /* cancel ALL T3-send timer if accum moved */
4774 /*******************************************/
4775 if (asoc->sctp_cmt_on_off > 0) {
4776 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4777 if (net->new_pseudo_cumack)
4778 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4780 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4785 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4786 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4787 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4791 /********************************************/
4792 /* drop the acked chunks from the sentqueue */
4793 /********************************************/
4794 asoc->last_acked_seq = cum_ack;
4796 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4797 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4800 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4801 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4802 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4805 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4809 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4810 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4811 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4812 asoc->trigger_reset = 1;
4814 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4815 if (PR_SCTP_ENABLED(tp1->flags)) {
4816 if (asoc->pr_sctp_cnt != 0)
4817 asoc->pr_sctp_cnt--;
4819 asoc->sent_queue_cnt--;
4821 /* sa_ignore NO_NULL_CHK */
4822 sctp_free_bufspace(stcb, asoc, tp1, 1);
4823 sctp_m_freem(tp1->data);
4825 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4826 asoc->sent_queue_cnt_removeable--;
4829 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4830 sctp_log_sack(asoc->last_acked_seq,
4835 SCTP_LOG_FREE_SENT);
4837 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4840 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4842 panic("Warning flight size is positive and should be 0");
4844 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4845 asoc->total_flight);
4847 asoc->total_flight = 0;
4850 /* sa_ignore NO_NULL_CHK */
4851 if ((wake_him) && (stcb->sctp_socket)) {
4852 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4853 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4854 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4856 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4858 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4859 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4863 if (asoc->fast_retran_loss_recovery && accum_moved) {
4864 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4865 /* Setup so we will exit RFC2582 fast recovery */
4866 will_exit_fast_recovery = 1;
4870 * Check for revoked fragments:
4872 * if Previous sack - Had no frags then we can't have any revoked if
4873 * Previous sack - Had frag's then - If we now have frags aka
4874 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4875 * some of them. else - The peer revoked all ACKED fragments, since
4876 * we had some before and now we have NONE.
4880 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4881 asoc->saw_sack_with_frags = 1;
4882 } else if (asoc->saw_sack_with_frags) {
4883 int cnt_revoked = 0;
4885 /* Peer revoked all dg's marked or acked */
4886 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4887 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4888 tp1->sent = SCTP_DATAGRAM_SENT;
4889 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4890 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4891 tp1->whoTo->flight_size,
4893 (uint32_t)(uintptr_t)tp1->whoTo,
4896 sctp_flight_size_increase(tp1);
4897 sctp_total_flight_increase(stcb, tp1);
4898 tp1->rec.data.chunk_was_revoked = 1;
4900 * To ensure that this increase in
4901 * flightsize, which is artificial, does not
4902 * throttle the sender, we also increase the
4903 * cwnd artificially.
4905 tp1->whoTo->cwnd += tp1->book_size;
4912 asoc->saw_sack_with_frags = 0;
4915 asoc->saw_sack_with_nr_frags = 1;
4917 asoc->saw_sack_with_nr_frags = 0;
4919 /* JRS - Use the congestion control given in the CC module */
4920 if (ecne_seen == 0) {
4921 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4922 if (net->net_ack2 > 0) {
4924 * Karn's rule applies to clearing error
4925 * count, this is optional.
4927 net->error_count = 0;
4928 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4929 /* addr came good */
4930 net->dest_state |= SCTP_ADDR_REACHABLE;
4931 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4932 0, (void *)net, SCTP_SO_NOT_LOCKED);
4935 if (net == stcb->asoc.primary_destination) {
4936 if (stcb->asoc.alternate) {
4938 * release the alternate,
4941 sctp_free_remote_addr(stcb->asoc.alternate);
4942 stcb->asoc.alternate = NULL;
4946 if (net->dest_state & SCTP_ADDR_PF) {
4947 net->dest_state &= ~SCTP_ADDR_PF;
4948 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4949 stcb->sctp_ep, stcb, net,
4950 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4951 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4952 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4953 /* Done with this net */
4956 /* restore any doubled timers */
4957 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4958 if (net->RTO < stcb->asoc.minrto) {
4959 net->RTO = stcb->asoc.minrto;
4961 if (net->RTO > stcb->asoc.maxrto) {
4962 net->RTO = stcb->asoc.maxrto;
4966 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4969 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4970 /* nothing left in-flight */
4971 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4972 /* stop all timers */
4973 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4975 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4976 net->flight_size = 0;
4977 net->partial_bytes_acked = 0;
4979 asoc->total_flight = 0;
4980 asoc->total_flight_count = 0;
4983 /**********************************/
4984 /* Now what about shutdown issues */
4985 /**********************************/
4986 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4987 /* nothing left on sendqueue.. consider done */
4988 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4989 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4990 asoc->peers_rwnd, 0, 0, a_rwnd);
4992 asoc->peers_rwnd = a_rwnd;
4993 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4994 /* SWS sender side engages */
4995 asoc->peers_rwnd = 0;
4998 if ((asoc->stream_queue_cnt == 1) &&
4999 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5000 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5001 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5002 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5004 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5005 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5006 (asoc->stream_queue_cnt == 1) &&
5007 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5008 struct mbuf *op_err;
5012 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5013 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_34;
5014 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5017 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5018 (asoc->stream_queue_cnt == 0)) {
5019 struct sctp_nets *netp;
5021 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5022 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5023 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5025 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5026 sctp_stop_timers_for_shutdown(stcb);
5027 if (asoc->alternate) {
5028 netp = asoc->alternate;
5030 netp = asoc->primary_destination;
5032 sctp_send_shutdown(stcb, netp);
5033 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5034 stcb->sctp_ep, stcb, netp);
5035 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5036 stcb->sctp_ep, stcb, NULL);
5038 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5039 (asoc->stream_queue_cnt == 0)) {
5040 struct sctp_nets *netp;
5042 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5043 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5044 sctp_stop_timers_for_shutdown(stcb);
5045 if (asoc->alternate) {
5046 netp = asoc->alternate;
5048 netp = asoc->primary_destination;
5050 sctp_send_shutdown_ack(stcb, netp);
5051 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5052 stcb->sctp_ep, stcb, netp);
5057 * Now here we are going to recycle net_ack for a different use...
5060 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5065 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5066 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5067 * automatically ensure that.
5069 if ((asoc->sctp_cmt_on_off > 0) &&
5070 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5071 (cmt_dac_flag == 0)) {
5072 this_sack_lowest_newack = cum_ack;
5074 if ((num_seg > 0) || (num_nr_seg > 0)) {
5075 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5076 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5078 /* JRS - Use the congestion control given in the CC module */
5079 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5081 /* Now are we exiting loss recovery ? */
5082 if (will_exit_fast_recovery) {
5083 /* Ok, we must exit fast recovery */
5084 asoc->fast_retran_loss_recovery = 0;
5086 if ((asoc->sat_t3_loss_recovery) &&
5087 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5088 /* end satellite t3 loss recovery */
5089 asoc->sat_t3_loss_recovery = 0;
5094 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5095 if (net->will_exit_fast_recovery) {
5096 /* Ok, we must exit fast recovery */
5097 net->fast_retran_loss_recovery = 0;
5101 /* Adjust and set the new rwnd value */
5102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5103 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5104 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5106 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5107 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5108 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5109 /* SWS sender side engages */
5110 asoc->peers_rwnd = 0;
5112 if (asoc->peers_rwnd > old_rwnd) {
5113 win_probe_recovery = 1;
5117 * Now we must setup so we have a timer up for anyone with
5123 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5124 if (win_probe_recovery && (net->window_probe)) {
5125 win_probe_recovered = 1;
5127 * Find first chunk that was used with
5128 * window probe and clear the event. Put
5129 * it back into the send queue as if has
5132 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5133 if (tp1->window_probe) {
5134 sctp_window_probe_recovery(stcb, asoc, tp1);
5139 if (net->flight_size) {
5141 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5142 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5143 stcb->sctp_ep, stcb, net);
5145 if (net->window_probe) {
5146 net->window_probe = 0;
5149 if (net->window_probe) {
5151 * In window probes we must assure a timer
5152 * is still running there
5154 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5155 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5156 stcb->sctp_ep, stcb, net);
5159 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5160 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5162 SCTP_FROM_SCTP_INDATA + SCTP_LOC_35);
5167 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5168 (asoc->sent_queue_retran_cnt == 0) &&
5169 (win_probe_recovered == 0) &&
5172 * huh, this should not happen unless all packets are
5173 * PR-SCTP and marked to skip of course.
5175 if (sctp_fs_audit(asoc)) {
5176 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5177 net->flight_size = 0;
5179 asoc->total_flight = 0;
5180 asoc->total_flight_count = 0;
5181 asoc->sent_queue_retran_cnt = 0;
5182 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5183 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5184 sctp_flight_size_increase(tp1);
5185 sctp_total_flight_increase(stcb, tp1);
5186 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5187 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5194 /*********************************************/
5195 /* Here we perform PR-SCTP procedures */
5197 /*********************************************/
5198 /* C1. update advancedPeerAckPoint */
5199 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5200 asoc->advanced_peer_ack_point = cum_ack;
5202 /* C2. try to further move advancedPeerAckPoint ahead */
5203 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5204 struct sctp_tmit_chunk *lchk;
5205 uint32_t old_adv_peer_ack_point;
5207 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5208 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5209 /* C3. See if we need to send a Fwd-TSN */
5210 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5212 * ISSUE with ECN, see FWD-TSN processing.
5214 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5215 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5216 0xee, cum_ack, asoc->advanced_peer_ack_point,
5217 old_adv_peer_ack_point);
5219 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5220 send_forward_tsn(stcb, asoc);
5222 /* try to FR fwd-tsn's that get lost too */
5223 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5224 send_forward_tsn(stcb, asoc);
5228 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5229 if (lchk->whoTo != NULL) {
5234 /* Assure a timer is up */
5235 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5236 stcb->sctp_ep, stcb, lchk->whoTo);
5239 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5240 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5242 stcb->asoc.peers_rwnd,
5243 stcb->asoc.total_flight,
5244 stcb->asoc.total_output_queue_size);
5249 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5252 uint32_t cum_ack, a_rwnd;
5254 cum_ack = ntohl(cp->cumulative_tsn_ack);
5255 /* Arrange so a_rwnd does NOT change */
5256 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5258 /* Now call the express sack handling */
5259 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5263 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5264 struct sctp_stream_in *strmin)
5266 struct sctp_queued_to_read *control, *ncontrol;
5267 struct sctp_association *asoc;
5269 int need_reasm_check = 0;
5272 mid = strmin->last_mid_delivered;
5274 * First deliver anything prior to and including the stream no that
5277 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5278 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5279 /* this is deliverable now */
5280 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5281 if (control->on_strm_q) {
5282 if (control->on_strm_q == SCTP_ON_ORDERED) {
5283 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5284 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5285 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5288 panic("strmin: %p ctl: %p unknown %d",
5289 strmin, control, control->on_strm_q);
5292 control->on_strm_q = 0;
5294 /* subtract pending on streams */
5295 if (asoc->size_on_all_streams >= control->length) {
5296 asoc->size_on_all_streams -= control->length;
5299 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5301 asoc->size_on_all_streams = 0;
5304 sctp_ucount_decr(asoc->cnt_on_all_streams);
5305 /* deliver it to at least the delivery-q */
5306 if (stcb->sctp_socket) {
5307 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5308 sctp_add_to_readq(stcb->sctp_ep, stcb,
5310 &stcb->sctp_socket->so_rcv,
5311 1, SCTP_READ_LOCK_HELD,
5312 SCTP_SO_NOT_LOCKED);
5315 /* Its a fragmented message */
5316 if (control->first_frag_seen) {
5318 * Make it so this is next to
5319 * deliver, we restore later
5321 strmin->last_mid_delivered = control->mid - 1;
5322 need_reasm_check = 1;
5327 /* no more delivery now. */
5331 if (need_reasm_check) {
5334 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5335 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5336 /* Restore the next to deliver unless we are ahead */
5337 strmin->last_mid_delivered = mid;
5340 /* Left the front Partial one on */
5343 need_reasm_check = 0;
5346 * now we must deliver things in queue the normal way if any are
5349 mid = strmin->last_mid_delivered + 1;
5350 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5351 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5352 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5353 /* this is deliverable now */
5354 if (control->on_strm_q) {
5355 if (control->on_strm_q == SCTP_ON_ORDERED) {
5356 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5357 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5358 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5361 panic("strmin: %p ctl: %p unknown %d",
5362 strmin, control, control->on_strm_q);
5365 control->on_strm_q = 0;
5367 /* subtract pending on streams */
5368 if (asoc->size_on_all_streams >= control->length) {
5369 asoc->size_on_all_streams -= control->length;
5372 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5374 asoc->size_on_all_streams = 0;
5377 sctp_ucount_decr(asoc->cnt_on_all_streams);
5378 /* deliver it to at least the delivery-q */
5379 strmin->last_mid_delivered = control->mid;
5380 if (stcb->sctp_socket) {
5381 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5382 sctp_add_to_readq(stcb->sctp_ep, stcb,
5384 &stcb->sctp_socket->so_rcv, 1,
5385 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5388 mid = strmin->last_mid_delivered + 1;
5390 /* Its a fragmented message */
5391 if (control->first_frag_seen) {
5393 * Make it so this is next to
5396 strmin->last_mid_delivered = control->mid - 1;
5397 need_reasm_check = 1;
5405 if (need_reasm_check) {
5406 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5413 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5414 struct sctp_association *asoc,
5415 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5417 struct sctp_queued_to_read *control;
5418 struct sctp_stream_in *strm;
5419 struct sctp_tmit_chunk *chk, *nchk;
5420 int cnt_removed = 0;
5423 * For now large messages held on the stream reasm that are complete
5424 * will be tossed too. We could in theory do more work to spin
5425 * through and stop after dumping one msg aka seeing the start of a
5426 * new msg at the head, and call the delivery function... to see if
5427 * it can be delivered... But for now we just dump everything on the
5430 strm = &asoc->strmin[stream];
5431 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5432 if (control == NULL) {
5436 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5439 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5440 /* Purge hanging chunks */
5441 if (!asoc->idata_supported && (ordered == 0)) {
5442 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5447 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5448 if (asoc->size_on_reasm_queue >= chk->send_size) {
5449 asoc->size_on_reasm_queue -= chk->send_size;
5452 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5454 asoc->size_on_reasm_queue = 0;
5457 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5459 sctp_m_freem(chk->data);
5462 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5464 if (!TAILQ_EMPTY(&control->reasm)) {
5465 /* This has to be old data, unordered */
5466 if (control->data) {
5467 sctp_m_freem(control->data);
5468 control->data = NULL;
5470 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5471 chk = TAILQ_FIRST(&control->reasm);
5472 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5473 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5474 sctp_add_chk_to_control(control, strm, stcb, asoc,
5475 chk, SCTP_READ_LOCK_HELD);
5477 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5480 if (control->on_strm_q == SCTP_ON_ORDERED) {
5481 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5482 if (asoc->size_on_all_streams >= control->length) {
5483 asoc->size_on_all_streams -= control->length;
5486 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5488 asoc->size_on_all_streams = 0;
5491 sctp_ucount_decr(asoc->cnt_on_all_streams);
5492 control->on_strm_q = 0;
5493 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5494 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5495 control->on_strm_q = 0;
5497 } else if (control->on_strm_q) {
5498 panic("strm: %p ctl: %p unknown %d",
5499 strm, control, control->on_strm_q);
5502 control->on_strm_q = 0;
5503 if (control->on_read_q == 0) {
5504 sctp_free_remote_addr(control->whoFrom);
5505 if (control->data) {
5506 sctp_m_freem(control->data);
5507 control->data = NULL;
5509 sctp_free_a_readq(stcb, control);
5514 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5515 struct sctp_forward_tsn_chunk *fwd,
5516 int *abort_flag, struct mbuf *m, int offset)
5518 /* The pr-sctp fwd tsn */
5520 * here we will perform all the data receiver side steps for
5521 * processing FwdTSN, as required in by pr-sctp draft:
5523 * Assume we get FwdTSN(x):
5525 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5526 * + others we have 3) examine and update re-ordering queue on
5527 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5528 * report where we are.
5530 struct sctp_association *asoc;
5531 uint32_t new_cum_tsn, gap;
5532 unsigned int i, fwd_sz, m_size;
5534 struct sctp_stream_in *strm;
5535 struct sctp_queued_to_read *control, *sv;
5538 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5539 SCTPDBG(SCTP_DEBUG_INDATA1,
5540 "Bad size too small/big fwd-tsn\n");
5543 m_size = (stcb->asoc.mapping_array_size << 3);
5544 /*************************************************************/
5545 /* 1. Here we update local cumTSN and shift the bitmap array */
5546 /*************************************************************/
5547 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5549 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5550 /* Already got there ... */
5554 * now we know the new TSN is more advanced, let's find the actual
5557 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5558 asoc->cumulative_tsn = new_cum_tsn;
5559 if (gap >= m_size) {
5560 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5561 struct mbuf *op_err;
5562 char msg[SCTP_DIAG_INFO_LEN];
5565 * out of range (of single byte chunks in the rwnd I
5566 * give out). This must be an attacker.
5569 SCTP_SNPRINTF(msg, sizeof(msg),
5570 "New cum ack %8.8x too high, highest TSN %8.8x",
5571 new_cum_tsn, asoc->highest_tsn_inside_map);
5572 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5573 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_36;
5574 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5577 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5579 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5580 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5581 asoc->highest_tsn_inside_map = new_cum_tsn;
5583 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5584 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5587 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5590 SCTP_TCB_LOCK_ASSERT(stcb);
5591 for (i = 0; i <= gap; i++) {
5592 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5593 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5594 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5595 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5596 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5601 /*************************************************************/
5602 /* 2. Clear up re-assembly queue */
5603 /*************************************************************/
5605 /* This is now done as part of clearing up the stream/seq */
5606 if (asoc->idata_supported == 0) {
5609 /* Flush all the un-ordered data based on cum-tsn */
5610 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5611 for (sid = 0; sid < asoc->streamincnt; sid++) {
5612 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5614 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5616 /*******************************************************/
5617 /* 3. Update the PR-stream re-ordering queues and fix */
5618 /* delivery issues as needed. */
5619 /*******************************************************/
5620 fwd_sz -= sizeof(*fwd);
5623 unsigned int num_str;
5624 uint32_t mid, cur_mid;
5626 uint16_t ordered, flags;
5627 struct sctp_strseq *stseq, strseqbuf;
5628 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5630 offset += sizeof(*fwd);
5632 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5633 if (asoc->idata_supported) {
5634 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5636 num_str = fwd_sz / sizeof(struct sctp_strseq);
5638 for (i = 0; i < num_str; i++) {
5639 if (asoc->idata_supported) {
5640 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5641 sizeof(struct sctp_strseq_mid),
5642 (uint8_t *)&strseqbuf_m);
5643 offset += sizeof(struct sctp_strseq_mid);
5644 if (stseq_m == NULL) {
5647 sid = ntohs(stseq_m->sid);
5648 mid = ntohl(stseq_m->mid);
5649 flags = ntohs(stseq_m->flags);
5650 if (flags & PR_SCTP_UNORDERED_FLAG) {
5656 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5657 sizeof(struct sctp_strseq),
5658 (uint8_t *)&strseqbuf);
5659 offset += sizeof(struct sctp_strseq);
5660 if (stseq == NULL) {
5663 sid = ntohs(stseq->sid);
5664 mid = (uint32_t)ntohs(stseq->ssn);
5672 * Ok we now look for the stream/seq on the read
5673 * queue where its not all delivered. If we find it
5674 * we transmute the read entry into a PDI_ABORTED.
5676 if (sid >= asoc->streamincnt) {
5677 /* screwed up streams, stop! */
5680 if ((asoc->str_of_pdapi == sid) &&
5681 (asoc->ssn_of_pdapi == mid)) {
5683 * If this is the one we were partially
5684 * delivering now then we no longer are.
5685 * Note this will change with the reassembly
5688 asoc->fragmented_delivery_inprogress = 0;
5690 strm = &asoc->strmin[sid];
5691 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5692 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5694 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5695 if ((control->sinfo_stream == sid) &&
5696 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5697 str_seq = (sid << 16) | (0x0000ffff & mid);
5698 control->pdapi_aborted = 1;
5699 sv = stcb->asoc.control_pdapi;
5700 control->end_added = 1;
5701 if (control->on_strm_q == SCTP_ON_ORDERED) {
5702 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5703 if (asoc->size_on_all_streams >= control->length) {
5704 asoc->size_on_all_streams -= control->length;
5707 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5709 asoc->size_on_all_streams = 0;
5712 sctp_ucount_decr(asoc->cnt_on_all_streams);
5713 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5714 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5716 } else if (control->on_strm_q) {
5717 panic("strm: %p ctl: %p unknown %d",
5718 strm, control, control->on_strm_q);
5721 control->on_strm_q = 0;
5722 stcb->asoc.control_pdapi = control;
5723 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5725 SCTP_PARTIAL_DELIVERY_ABORTED,
5727 SCTP_SO_NOT_LOCKED);
5728 stcb->asoc.control_pdapi = sv;
5730 } else if ((control->sinfo_stream == sid) &&
5731 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5732 /* We are past our victim SSN */
5736 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5737 /* Update the sequence number */
5738 strm->last_mid_delivered = mid;
5740 /* now kick the stream the new way */
5741 /* sa_ignore NO_NULL_CHK */
5742 sctp_kick_prsctp_reorder_queue(stcb, strm);
5744 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5747 * Now slide thing forward.
5749 sctp_slide_mapping_arrays(stcb);