2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int hold_rlock);
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
78 /* Calculate what the rwnd would be */
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
85 * This is really set wrong with respect to a 1-2-m socket. Since
86 * the sb_cc is the count that everyone as put up. When we re-write
87 * sctp_soreceive then we will fix this so that ONLY this
88 * associations data is taken into account.
90 if (stcb->sctp_socket == NULL) {
94 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 if (stcb->asoc.sb_cc == 0 &&
99 asoc->cnt_on_reasm_queue == 0 &&
100 asoc->cnt_on_all_streams == 0) {
101 /* Full rwnd granted */
102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
105 /* get actual space */
106 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
108 * take out what has NOT been put on socket queue and we yet hold
111 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 asoc->cnt_on_reasm_queue * MSIZE));
113 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 asoc->cnt_on_all_streams * MSIZE));
120 /* what is the overhead of all these rwnd's */
121 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
123 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 * even it is 0. SWS engaged
126 if (calc < stcb->asoc.my_rwnd_control_len) {
135 * Build out our readq entry based on the incoming packet.
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139 struct sctp_nets *net,
140 uint32_t tsn, uint32_t ppid,
141 uint32_t context, uint16_t sid,
142 uint32_t mid, uint8_t flags,
145 struct sctp_queued_to_read *read_queue_e = NULL;
147 sctp_alloc_a_readq(stcb, read_queue_e);
148 if (read_queue_e == NULL) {
151 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 read_queue_e->sinfo_stream = sid;
153 read_queue_e->sinfo_flags = (flags << 8);
154 read_queue_e->sinfo_ppid = ppid;
155 read_queue_e->sinfo_context = context;
156 read_queue_e->sinfo_tsn = tsn;
157 read_queue_e->sinfo_cumtsn = tsn;
158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 read_queue_e->mid = mid;
160 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 TAILQ_INIT(&read_queue_e->reasm);
162 read_queue_e->whoFrom = net;
163 atomic_add_int(&net->ref_count, 1);
164 read_queue_e->data = dm;
165 read_queue_e->stcb = stcb;
166 read_queue_e->port_from = stcb->rport;
167 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
168 read_queue_e->do_not_ref_stcb = 1;
171 return (read_queue_e);
175 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
177 struct sctp_extrcvinfo *seinfo;
178 struct sctp_sndrcvinfo *outinfo;
179 struct sctp_rcvinfo *rcvinfo;
180 struct sctp_nxtinfo *nxtinfo;
187 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
188 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
189 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
190 /* user does not want any ancillary data */
195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
196 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
198 seinfo = (struct sctp_extrcvinfo *)sinfo;
199 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
200 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
202 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
206 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
207 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
209 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
212 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
218 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
223 SCTP_BUF_LEN(ret) = 0;
225 /* We need a CMSG header followed by the struct */
226 cmh = mtod(ret, struct cmsghdr *);
228 * Make sure that there is no un-initialized padding between the
229 * cmsg header and cmsg data and after the cmsg data.
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
233 cmh->cmsg_level = IPPROTO_SCTP;
234 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
235 cmh->cmsg_type = SCTP_RCVINFO;
236 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
237 rcvinfo->rcv_sid = sinfo->sinfo_stream;
238 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
239 rcvinfo->rcv_flags = sinfo->sinfo_flags;
240 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
241 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
242 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
243 rcvinfo->rcv_context = sinfo->sinfo_context;
244 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
245 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
246 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
249 cmh->cmsg_level = IPPROTO_SCTP;
250 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
251 cmh->cmsg_type = SCTP_NXTINFO;
252 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
253 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
254 nxtinfo->nxt_flags = 0;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
256 nxtinfo->nxt_flags |= SCTP_UNORDERED;
258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
259 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
261 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
262 nxtinfo->nxt_flags |= SCTP_COMPLETE;
264 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
265 nxtinfo->nxt_length = seinfo->serinfo_next_length;
266 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
267 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
268 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
270 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
271 cmh->cmsg_level = IPPROTO_SCTP;
272 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
274 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
275 cmh->cmsg_type = SCTP_EXTRCV;
276 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
277 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
279 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
280 cmh->cmsg_type = SCTP_SNDRCV;
282 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
292 uint32_t gap, i, cumackp1;
294 int in_r = 0, in_nr = 0;
296 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
299 cumackp1 = asoc->cumulative_tsn + 1;
300 if (SCTP_TSN_GT(cumackp1, tsn)) {
302 * this tsn is behind the cum ack and thus we don't need to
303 * worry about it being moved from one to the other.
307 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
308 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
309 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
310 if ((in_r == 0) && (in_nr == 0)) {
312 panic("Things are really messed up now");
314 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
315 sctp_print_mapping_array(asoc);
319 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
321 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
322 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
323 asoc->highest_tsn_inside_nr_map = tsn;
325 if (tsn == asoc->highest_tsn_inside_map) {
326 /* We must back down to see what the new highest is */
327 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
328 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
329 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
330 asoc->highest_tsn_inside_map = i;
336 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
342 sctp_place_control_in_stream(struct sctp_stream_in *strm,
343 struct sctp_association *asoc,
344 struct sctp_queued_to_read *control)
346 struct sctp_queued_to_read *at;
347 struct sctp_readhead *q;
348 uint8_t flags, unordered;
350 flags = (control->sinfo_flags >> 8);
351 unordered = flags & SCTP_DATA_UNORDERED;
353 q = &strm->uno_inqueue;
354 if (asoc->idata_supported == 0) {
355 if (!TAILQ_EMPTY(q)) {
357 * Only one stream can be here in old style
362 TAILQ_INSERT_TAIL(q, control, next_instrm);
363 control->on_strm_q = SCTP_ON_UNORDERED;
369 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
370 control->end_added = 1;
371 control->first_frag_seen = 1;
372 control->last_frag_seen = 1;
374 if (TAILQ_EMPTY(q)) {
376 TAILQ_INSERT_HEAD(q, control, next_instrm);
378 control->on_strm_q = SCTP_ON_UNORDERED;
380 control->on_strm_q = SCTP_ON_ORDERED;
384 TAILQ_FOREACH(at, q, next_instrm) {
385 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
387 * one in queue is bigger than the new one,
388 * insert before this one
390 TAILQ_INSERT_BEFORE(at, control, next_instrm);
392 control->on_strm_q = SCTP_ON_UNORDERED;
394 control->on_strm_q = SCTP_ON_ORDERED;
397 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
399 * Gak, He sent me a duplicate msg id
400 * number?? return -1 to abort.
404 if (TAILQ_NEXT(at, next_instrm) == NULL) {
406 * We are at the end, insert it
409 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
410 sctp_log_strm_del(control, at,
411 SCTP_STR_LOG_FROM_INSERT_TL);
413 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
415 control->on_strm_q = SCTP_ON_UNORDERED;
417 control->on_strm_q = SCTP_ON_ORDERED;
428 sctp_abort_in_reasm(struct sctp_tcb *stcb,
429 struct sctp_queued_to_read *control,
430 struct sctp_tmit_chunk *chk,
431 int *abort_flag, int opspot)
433 char msg[SCTP_DIAG_INFO_LEN];
436 if (stcb->asoc.idata_supported) {
437 SCTP_SNPRINTF(msg, sizeof(msg),
438 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
440 control->fsn_included,
443 chk->rec.data.fsn, chk->rec.data.mid);
445 SCTP_SNPRINTF(msg, sizeof(msg),
446 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
448 control->fsn_included,
452 (uint16_t)chk->rec.data.mid);
454 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
455 sctp_m_freem(chk->data);
457 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
458 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
459 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
464 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
467 * The control could not be placed and must be cleaned.
469 struct sctp_tmit_chunk *chk, *nchk;
471 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
472 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
474 sctp_m_freem(chk->data);
476 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
478 sctp_free_remote_addr(control->whoFrom);
480 sctp_m_freem(control->data);
481 control->data = NULL;
483 sctp_free_a_readq(stcb, control);
487 * Queue the chunk either right into the socket buffer if it is the next one
488 * to go OR put it in the correct place in the delivery queue. If we do
489 * append to the so_buf, keep doing so until we are out of order as
490 * long as the control's entered are non-fragmented.
493 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
494 struct sctp_association *asoc,
495 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
498 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
499 * all the data in one stream this could happen quite rapidly. One
500 * could use the TSN to keep track of things, but this scheme breaks
501 * down in the other type of stream usage that could occur. Send a
502 * single msg to stream 0, send 4Billion messages to stream 1, now
503 * send a message to stream 0. You have a situation where the TSN
504 * has wrapped but not in the stream. Is this worth worrying about
505 * or should we just change our queue sort at the bottom to be by
508 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
509 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
510 * assignment this could happen... and I don't see how this would be
511 * a violation. So for now I am undecided an will leave the sort by
512 * SSN alone. Maybe a hybred approach is the answer
515 struct sctp_queued_to_read *at;
519 struct sctp_stream_in *strm;
520 char msg[SCTP_DIAG_INFO_LEN];
522 strm = &asoc->strmin[control->sinfo_stream];
523 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
524 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
526 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
527 /* The incoming sseq is behind where we last delivered? */
528 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
529 strm->last_mid_delivered, control->mid);
531 * throw it in the stream so it gets cleaned up in
532 * association destruction
534 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
535 if (asoc->idata_supported) {
536 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
537 strm->last_mid_delivered, control->sinfo_tsn,
538 control->sinfo_stream, control->mid);
540 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
541 (uint16_t)strm->last_mid_delivered,
543 control->sinfo_stream,
544 (uint16_t)control->mid);
546 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
547 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
548 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
554 asoc->size_on_all_streams += control->length;
555 sctp_ucount_incr(asoc->cnt_on_all_streams);
556 nxt_todel = strm->last_mid_delivered + 1;
557 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
558 /* can be delivered right away? */
559 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
560 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
562 /* EY it wont be queued if it could be delivered directly */
564 if (asoc->size_on_all_streams >= control->length) {
565 asoc->size_on_all_streams -= control->length;
568 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
570 asoc->size_on_all_streams = 0;
573 sctp_ucount_decr(asoc->cnt_on_all_streams);
574 strm->last_mid_delivered++;
575 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
576 sctp_add_to_readq(stcb->sctp_ep, stcb,
578 &stcb->sctp_socket->so_rcv, 1,
579 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
580 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
582 nxt_todel = strm->last_mid_delivered + 1;
583 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
584 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
585 if (control->on_strm_q == SCTP_ON_ORDERED) {
586 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
587 if (asoc->size_on_all_streams >= control->length) {
588 asoc->size_on_all_streams -= control->length;
591 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
593 asoc->size_on_all_streams = 0;
596 sctp_ucount_decr(asoc->cnt_on_all_streams);
599 panic("Huh control: %p is on_strm_q: %d",
600 control, control->on_strm_q);
603 control->on_strm_q = 0;
604 strm->last_mid_delivered++;
606 * We ignore the return of deliver_data here
607 * since we always can hold the chunk on the
608 * d-queue. And we have a finite number that
609 * can be delivered from the strq.
611 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
612 sctp_log_strm_del(control, NULL,
613 SCTP_STR_LOG_FROM_IMMED_DEL);
615 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
616 sctp_add_to_readq(stcb->sctp_ep, stcb,
618 &stcb->sctp_socket->so_rcv, 1,
619 SCTP_READ_LOCK_NOT_HELD,
622 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
630 * Ok, we did not deliver this guy, find the correct place
631 * to put it on the queue.
633 if (sctp_place_control_in_stream(strm, asoc, control)) {
634 SCTP_SNPRINTF(msg, sizeof(msg),
635 "Queue to str MID: %u duplicate", control->mid);
636 sctp_clean_up_control(stcb, control);
637 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
638 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
639 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
647 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
649 struct mbuf *m, *prev = NULL;
650 struct sctp_tcb *stcb;
652 stcb = control->stcb;
653 control->held_length = 0;
657 if (SCTP_BUF_LEN(m) == 0) {
658 /* Skip mbufs with NO length */
661 control->data = sctp_m_free(m);
664 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
665 m = SCTP_BUF_NEXT(prev);
668 control->tail_mbuf = prev;
673 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
674 if (control->on_read_q) {
676 * On read queue so we must increment the SB stuff,
677 * we assume caller has done any locks of SB.
679 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
681 m = SCTP_BUF_NEXT(m);
684 control->tail_mbuf = prev;
689 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
691 struct mbuf *prev = NULL;
692 struct sctp_tcb *stcb;
694 stcb = control->stcb;
697 panic("Control broken");
702 if (control->tail_mbuf == NULL) {
704 sctp_m_freem(control->data);
706 sctp_setup_tail_pointer(control);
709 control->tail_mbuf->m_next = m;
711 if (SCTP_BUF_LEN(m) == 0) {
712 /* Skip mbufs with NO length */
715 control->tail_mbuf->m_next = sctp_m_free(m);
716 m = control->tail_mbuf->m_next;
718 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
719 m = SCTP_BUF_NEXT(prev);
722 control->tail_mbuf = prev;
727 if (control->on_read_q) {
729 * On read queue so we must increment the SB stuff,
730 * we assume caller has done any locks of SB.
732 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
734 *added += SCTP_BUF_LEN(m);
735 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
736 m = SCTP_BUF_NEXT(m);
739 control->tail_mbuf = prev;
744 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
746 memset(nc, 0, sizeof(struct sctp_queued_to_read));
747 nc->sinfo_stream = control->sinfo_stream;
748 nc->mid = control->mid;
749 TAILQ_INIT(&nc->reasm);
750 nc->top_fsn = control->top_fsn;
751 nc->mid = control->mid;
752 nc->sinfo_flags = control->sinfo_flags;
753 nc->sinfo_ppid = control->sinfo_ppid;
754 nc->sinfo_context = control->sinfo_context;
755 nc->fsn_included = 0xffffffff;
756 nc->sinfo_tsn = control->sinfo_tsn;
757 nc->sinfo_cumtsn = control->sinfo_cumtsn;
758 nc->sinfo_assoc_id = control->sinfo_assoc_id;
759 nc->whoFrom = control->whoFrom;
760 atomic_add_int(&nc->whoFrom->ref_count, 1);
761 nc->stcb = control->stcb;
762 nc->port_from = control->port_from;
763 nc->do_not_ref_stcb = control->do_not_ref_stcb;
767 sctp_reset_a_control(struct sctp_queued_to_read *control,
768 struct sctp_inpcb *inp, uint32_t tsn)
770 control->fsn_included = tsn;
771 if (control->on_read_q) {
773 * We have to purge it from there, hopefully this will work
776 TAILQ_REMOVE(&inp->read_queue, control, next);
777 control->on_read_q = 0;
782 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
783 struct sctp_association *asoc,
784 struct sctp_stream_in *strm,
785 struct sctp_queued_to_read *control,
787 int inp_read_lock_held)
790 * Special handling for the old un-ordered data chunk. All the
791 * chunks/TSN's go to mid 0. So we have to do the old style watching
792 * to see if we have it all. If you return one, no other control
793 * entries on the un-ordered queue will be looked at. In theory
794 * there should be no others entries in reality, unless the guy is
795 * sending both unordered NDATA and unordered DATA...
797 struct sctp_tmit_chunk *chk, *lchk, *tchk;
799 struct sctp_queued_to_read *nc;
802 if (control->first_frag_seen == 0) {
803 /* Nothing we can do, we have not seen the first piece yet */
806 /* Collapse any we can */
809 fsn = control->fsn_included + 1;
810 /* Now what can we add? */
811 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
812 if (chk->rec.data.fsn == fsn) {
814 sctp_alloc_a_readq(stcb, nc);
818 memset(nc, 0, sizeof(struct sctp_queued_to_read));
819 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
820 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
824 if (control->end_added) {
826 if (!TAILQ_EMPTY(&control->reasm)) {
828 * Ok we have to move anything left
829 * on the control queue to a new
832 sctp_build_readq_entry_from_ctl(nc, control);
833 tchk = TAILQ_FIRST(&control->reasm);
834 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
835 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
836 if (asoc->size_on_reasm_queue >= tchk->send_size) {
837 asoc->size_on_reasm_queue -= tchk->send_size;
840 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
842 asoc->size_on_reasm_queue = 0;
845 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
846 nc->first_frag_seen = 1;
847 nc->fsn_included = tchk->rec.data.fsn;
848 nc->data = tchk->data;
849 nc->sinfo_ppid = tchk->rec.data.ppid;
850 nc->sinfo_tsn = tchk->rec.data.tsn;
851 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
853 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
854 sctp_setup_tail_pointer(nc);
855 tchk = TAILQ_FIRST(&control->reasm);
857 /* Spin the rest onto the queue */
859 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
860 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
861 tchk = TAILQ_FIRST(&control->reasm);
864 * Now lets add it to the queue
865 * after removing control
867 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
868 nc->on_strm_q = SCTP_ON_UNORDERED;
869 if (control->on_strm_q) {
870 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
871 control->on_strm_q = 0;
874 if (control->pdapi_started) {
875 strm->pd_api_started = 0;
876 control->pdapi_started = 0;
878 if (control->on_strm_q) {
879 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
880 control->on_strm_q = 0;
881 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
883 if (control->on_read_q == 0) {
884 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
885 &stcb->sctp_socket->so_rcv, control->end_added,
886 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
888 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
889 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
891 * Switch to the new guy and
897 if (nc->on_strm_q == 0) {
898 sctp_free_a_readq(stcb, nc);
903 sctp_free_a_readq(stcb, nc);
910 if (cnt_added && strm->pd_api_started) {
911 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
913 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
914 strm->pd_api_started = 1;
915 control->pdapi_started = 1;
916 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
917 &stcb->sctp_socket->so_rcv, control->end_added,
918 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
919 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
927 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
928 struct sctp_association *asoc,
929 struct sctp_queued_to_read *control,
930 struct sctp_tmit_chunk *chk,
933 struct sctp_tmit_chunk *at;
937 * Here we need to place the chunk into the control structure sorted
938 * in the correct order.
940 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
941 /* Its the very first one. */
942 SCTPDBG(SCTP_DEBUG_XXX,
943 "chunk is a first fsn: %u becomes fsn_included\n",
945 at = TAILQ_FIRST(&control->reasm);
946 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
948 * The first chunk in the reassembly is a smaller
949 * TSN than this one, even though this has a first,
950 * it must be from a subsequent msg.
954 if (control->first_frag_seen) {
956 * In old un-ordered we can reassembly on one
957 * control multiple messages. As long as the next
958 * FIRST is greater then the old first (TSN i.e. FSN
964 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
966 * Easy way the start of a new guy beyond
971 if ((chk->rec.data.fsn == control->fsn_included) ||
972 (control->pdapi_started)) {
974 * Ok this should not happen, if it does we
975 * started the pd-api on the higher TSN
976 * (since the equals part is a TSN failure
979 * We are completly hosed in that case since
980 * I have no way to recover. This really
981 * will only happen if we can get more TSN's
982 * higher before the pd-api-point.
984 sctp_abort_in_reasm(stcb, control, chk,
986 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
991 * Ok we have two firsts and the one we just got is
992 * smaller than the one we previously placed.. yuck!
993 * We must swap them out.
996 tdata = control->data;
997 control->data = chk->data;
999 /* Save the lengths */
1000 chk->send_size = control->length;
1001 /* Recompute length of control and tail pointer */
1002 sctp_setup_tail_pointer(control);
1003 /* Fix the FSN included */
1004 tmp = control->fsn_included;
1005 control->fsn_included = chk->rec.data.fsn;
1006 chk->rec.data.fsn = tmp;
1007 /* Fix the TSN included */
1008 tmp = control->sinfo_tsn;
1009 control->sinfo_tsn = chk->rec.data.tsn;
1010 chk->rec.data.tsn = tmp;
1011 /* Fix the PPID included */
1012 tmp = control->sinfo_ppid;
1013 control->sinfo_ppid = chk->rec.data.ppid;
1014 chk->rec.data.ppid = tmp;
1015 /* Fix tail pointer */
1018 control->first_frag_seen = 1;
1019 control->fsn_included = chk->rec.data.fsn;
1020 control->top_fsn = chk->rec.data.fsn;
1021 control->sinfo_tsn = chk->rec.data.tsn;
1022 control->sinfo_ppid = chk->rec.data.ppid;
1023 control->data = chk->data;
1024 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1026 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1027 sctp_setup_tail_pointer(control);
1032 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1033 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1035 * This one in queue is bigger than the new one,
1036 * insert the new one before at.
1038 asoc->size_on_reasm_queue += chk->send_size;
1039 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1041 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1043 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1045 * They sent a duplicate fsn number. This really
1046 * should not happen since the FSN is a TSN and it
1047 * should have been dropped earlier.
1049 sctp_abort_in_reasm(stcb, control, chk,
1051 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1056 if (inserted == 0) {
1057 /* Its at the end */
1058 asoc->size_on_reasm_queue += chk->send_size;
1059 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1060 control->top_fsn = chk->rec.data.fsn;
1061 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1066 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1067 struct sctp_stream_in *strm, int inp_read_lock_held)
1070 * Given a stream, strm, see if any of the SSN's on it that are
1071 * fragmented are ready to deliver. If so go ahead and place them on
1072 * the read queue. In so placing if we have hit the end, then we
1073 * need to remove them from the stream's queue.
1075 struct sctp_queued_to_read *control, *nctl = NULL;
1076 uint32_t next_to_del;
1080 if (stcb->sctp_socket) {
1081 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1082 stcb->sctp_ep->partial_delivery_point);
1084 pd_point = stcb->sctp_ep->partial_delivery_point;
1086 control = TAILQ_FIRST(&strm->uno_inqueue);
1088 if ((control != NULL) &&
1089 (asoc->idata_supported == 0)) {
1090 /* Special handling needed for "old" data format */
1091 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1095 if (strm->pd_api_started) {
1096 /* Can't add more */
1100 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1101 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1102 nctl = TAILQ_NEXT(control, next_instrm);
1103 if (control->end_added) {
1104 /* We just put the last bit on */
1105 if (control->on_strm_q) {
1107 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1108 panic("Huh control: %p on_q: %d -- not unordered?",
1109 control, control->on_strm_q);
1112 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1113 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1114 if (asoc->size_on_all_streams >= control->length) {
1115 asoc->size_on_all_streams -= control->length;
1118 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1120 asoc->size_on_all_streams = 0;
1123 sctp_ucount_decr(asoc->cnt_on_all_streams);
1124 control->on_strm_q = 0;
1126 if (control->on_read_q == 0) {
1127 sctp_add_to_readq(stcb->sctp_ep, stcb,
1129 &stcb->sctp_socket->so_rcv, control->end_added,
1130 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1133 /* Can we do a PD-API for this un-ordered guy? */
1134 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1135 strm->pd_api_started = 1;
1136 control->pdapi_started = 1;
1137 sctp_add_to_readq(stcb->sctp_ep, stcb,
1139 &stcb->sctp_socket->so_rcv, control->end_added,
1140 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1148 control = TAILQ_FIRST(&strm->inqueue);
1149 if (strm->pd_api_started) {
1150 /* Can't add more */
1153 if (control == NULL) {
1156 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1158 * Ok the guy at the top was being partially delivered
1159 * completed, so we remove it. Note the pd_api flag was
1160 * taken off when the chunk was merged on in
1161 * sctp_queue_data_for_reasm below.
1163 nctl = TAILQ_NEXT(control, next_instrm);
1164 SCTPDBG(SCTP_DEBUG_XXX,
1165 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1166 control, control->end_added, control->mid,
1167 control->top_fsn, control->fsn_included,
1168 strm->last_mid_delivered);
1169 if (control->end_added) {
1170 if (control->on_strm_q) {
1172 if (control->on_strm_q != SCTP_ON_ORDERED) {
1173 panic("Huh control: %p on_q: %d -- not ordered?",
1174 control, control->on_strm_q);
1177 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1178 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1179 if (asoc->size_on_all_streams >= control->length) {
1180 asoc->size_on_all_streams -= control->length;
1183 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1185 asoc->size_on_all_streams = 0;
1188 sctp_ucount_decr(asoc->cnt_on_all_streams);
1189 control->on_strm_q = 0;
1191 if (strm->pd_api_started && control->pdapi_started) {
1192 control->pdapi_started = 0;
1193 strm->pd_api_started = 0;
1195 if (control->on_read_q == 0) {
1196 sctp_add_to_readq(stcb->sctp_ep, stcb,
1198 &stcb->sctp_socket->so_rcv, control->end_added,
1199 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1204 if (strm->pd_api_started) {
1206 * Can't add more must have gotten an un-ordered above being
1207 * partially delivered.
1212 next_to_del = strm->last_mid_delivered + 1;
1214 SCTPDBG(SCTP_DEBUG_XXX,
1215 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1216 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1218 nctl = TAILQ_NEXT(control, next_instrm);
1219 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1220 (control->first_frag_seen)) {
1223 /* Ok we can deliver it onto the stream. */
1224 if (control->end_added) {
1225 /* We are done with it afterwards */
1226 if (control->on_strm_q) {
1228 if (control->on_strm_q != SCTP_ON_ORDERED) {
1229 panic("Huh control: %p on_q: %d -- not ordered?",
1230 control, control->on_strm_q);
1233 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1234 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1235 if (asoc->size_on_all_streams >= control->length) {
1236 asoc->size_on_all_streams -= control->length;
1239 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1241 asoc->size_on_all_streams = 0;
1244 sctp_ucount_decr(asoc->cnt_on_all_streams);
1245 control->on_strm_q = 0;
1249 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1251 * A singleton now slipping through - mark
1252 * it non-revokable too
1254 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1255 } else if (control->end_added == 0) {
1257 * Check if we can defer adding until its
1260 if ((control->length < pd_point) || (strm->pd_api_started)) {
1262 * Don't need it or cannot add more
1263 * (one being delivered that way)
1268 done = (control->end_added) && (control->last_frag_seen);
1269 if (control->on_read_q == 0) {
1271 if (asoc->size_on_all_streams >= control->length) {
1272 asoc->size_on_all_streams -= control->length;
1275 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1277 asoc->size_on_all_streams = 0;
1280 strm->pd_api_started = 1;
1281 control->pdapi_started = 1;
1283 sctp_add_to_readq(stcb->sctp_ep, stcb,
1285 &stcb->sctp_socket->so_rcv, control->end_added,
1286 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1288 strm->last_mid_delivered = next_to_del;
1301 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1302 struct sctp_stream_in *strm,
1303 struct sctp_tcb *stcb, struct sctp_association *asoc,
1304 struct sctp_tmit_chunk *chk, int hold_rlock)
1307 * Given a control and a chunk, merge the data from the chk onto the
1308 * control and free up the chunk resources.
1313 if (control->on_read_q && (hold_rlock == 0)) {
1315 * Its being pd-api'd so we must do some locks.
1317 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1320 if (control->data == NULL) {
1321 control->data = chk->data;
1322 sctp_setup_tail_pointer(control);
1324 sctp_add_to_tail_pointer(control, chk->data, &added);
1326 control->fsn_included = chk->rec.data.fsn;
1327 asoc->size_on_reasm_queue -= chk->send_size;
1328 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1329 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1331 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1332 control->first_frag_seen = 1;
1333 control->sinfo_tsn = chk->rec.data.tsn;
1334 control->sinfo_ppid = chk->rec.data.ppid;
1336 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1338 if ((control->on_strm_q) && (control->on_read_q)) {
1339 if (control->pdapi_started) {
1340 control->pdapi_started = 0;
1341 strm->pd_api_started = 0;
1343 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1345 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1346 control->on_strm_q = 0;
1347 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1349 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1351 * Don't need to decrement
1352 * size_on_all_streams, since control is on
1355 sctp_ucount_decr(asoc->cnt_on_all_streams);
1356 control->on_strm_q = 0;
1358 } else if (control->on_strm_q) {
1359 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1360 control->on_strm_q);
1364 control->end_added = 1;
1365 control->last_frag_seen = 1;
1368 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1370 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1375 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1376 * queue, see if anthing can be delivered. If so pull it off (or as much as
1377 * we can. If we run out of space then we must dump what we can and set the
1378 * appropriate flag to say we queued what we could.
1381 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1382 struct sctp_queued_to_read *control,
1383 struct sctp_tmit_chunk *chk,
1384 int created_control,
1385 int *abort_flag, uint32_t tsn)
1388 struct sctp_tmit_chunk *at, *nat;
1389 struct sctp_stream_in *strm;
1390 int do_wakeup, unordered;
1393 strm = &asoc->strmin[control->sinfo_stream];
1395 * For old un-ordered data chunks.
1397 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1402 /* Must be added to the stream-in queue */
1403 if (created_control) {
1404 if ((unordered == 0) || (asoc->idata_supported)) {
1405 sctp_ucount_incr(asoc->cnt_on_all_streams);
1407 if (sctp_place_control_in_stream(strm, asoc, control)) {
1408 /* Duplicate SSN? */
1409 sctp_abort_in_reasm(stcb, control, chk,
1411 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1412 sctp_clean_up_control(stcb, control);
1415 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1417 * Ok we created this control and now lets validate
1418 * that its legal i.e. there is a B bit set, if not
1419 * and we have up to the cum-ack then its invalid.
1421 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1422 sctp_abort_in_reasm(stcb, control, chk,
1424 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1429 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1430 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1434 * Ok we must queue the chunk into the reasembly portion: o if its
1435 * the first it goes to the control mbuf. o if its not first but the
1436 * next in sequence it goes to the control, and each succeeding one
1437 * in order also goes. o if its not in order we place it on the list
1440 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1441 /* Its the very first one. */
1442 SCTPDBG(SCTP_DEBUG_XXX,
1443 "chunk is a first fsn: %u becomes fsn_included\n",
1445 if (control->first_frag_seen) {
1447 * Error on senders part, they either sent us two
1448 * data chunks with FIRST, or they sent two
1449 * un-ordered chunks that were fragmented at the
1450 * same time in the same stream.
1452 sctp_abort_in_reasm(stcb, control, chk,
1454 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1457 control->first_frag_seen = 1;
1458 control->sinfo_ppid = chk->rec.data.ppid;
1459 control->sinfo_tsn = chk->rec.data.tsn;
1460 control->fsn_included = chk->rec.data.fsn;
1461 control->data = chk->data;
1462 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1464 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1465 sctp_setup_tail_pointer(control);
1466 asoc->size_on_all_streams += control->length;
1468 /* Place the chunk in our list */
1471 if (control->last_frag_seen == 0) {
1472 /* Still willing to raise highest FSN seen */
1473 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1474 SCTPDBG(SCTP_DEBUG_XXX,
1475 "We have a new top_fsn: %u\n",
1477 control->top_fsn = chk->rec.data.fsn;
1479 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1480 SCTPDBG(SCTP_DEBUG_XXX,
1481 "The last fsn is now in place fsn: %u\n",
1483 control->last_frag_seen = 1;
1484 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1485 SCTPDBG(SCTP_DEBUG_XXX,
1486 "New fsn: %u is not at top_fsn: %u -- abort\n",
1489 sctp_abort_in_reasm(stcb, control, chk,
1491 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1495 if (asoc->idata_supported || control->first_frag_seen) {
1497 * For IDATA we always check since we know
1498 * that the first fragment is 0. For old
1499 * DATA we have to receive the first before
1500 * we know the first FSN (which is the TSN).
1502 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1504 * We have already delivered up to
1507 sctp_abort_in_reasm(stcb, control, chk,
1509 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1514 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1515 /* Second last? huh? */
1516 SCTPDBG(SCTP_DEBUG_XXX,
1517 "Duplicate last fsn: %u (top: %u) -- abort\n",
1518 chk->rec.data.fsn, control->top_fsn);
1519 sctp_abort_in_reasm(stcb, control,
1521 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1524 if (asoc->idata_supported || control->first_frag_seen) {
1526 * For IDATA we always check since we know
1527 * that the first fragment is 0. For old
1528 * DATA we have to receive the first before
1529 * we know the first FSN (which is the TSN).
1532 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1534 * We have already delivered up to
1537 SCTPDBG(SCTP_DEBUG_XXX,
1538 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1539 chk->rec.data.fsn, control->fsn_included);
1540 sctp_abort_in_reasm(stcb, control, chk,
1542 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1547 * validate not beyond top FSN if we have seen last
1550 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1551 SCTPDBG(SCTP_DEBUG_XXX,
1552 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1555 sctp_abort_in_reasm(stcb, control, chk,
1557 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1562 * If we reach here, we need to place the new chunk in the
1563 * reassembly for this control.
1565 SCTPDBG(SCTP_DEBUG_XXX,
1566 "chunk is a not first fsn: %u needs to be inserted\n",
1568 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1569 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1571 * This one in queue is bigger than the new
1572 * one, insert the new one before at.
1574 SCTPDBG(SCTP_DEBUG_XXX,
1575 "Insert it before fsn: %u\n",
1577 asoc->size_on_reasm_queue += chk->send_size;
1578 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1579 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1582 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1584 * Gak, He sent me a duplicate str seq
1588 * foo bar, I guess I will just free this
1589 * new guy, should we abort too? FIX ME
1590 * MAYBE? Or it COULD be that the SSN's have
1591 * wrapped. Maybe I should compare to TSN
1592 * somehow... sigh for now just blow away
1595 SCTPDBG(SCTP_DEBUG_XXX,
1596 "Duplicate to fsn: %u -- abort\n",
1598 sctp_abort_in_reasm(stcb, control,
1600 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1604 if (inserted == 0) {
1605 /* Goes on the end */
1606 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1608 asoc->size_on_reasm_queue += chk->send_size;
1609 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1610 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1614 * Ok lets see if we can suck any up into the control structure that
1615 * are in seq if it makes sense.
1619 * If the first fragment has not been seen there is no sense in
1622 if (control->first_frag_seen) {
1623 next_fsn = control->fsn_included + 1;
1624 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1625 if (at->rec.data.fsn == next_fsn) {
1626 /* We can add this one now to the control */
1627 SCTPDBG(SCTP_DEBUG_XXX,
1628 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1631 next_fsn, control->fsn_included);
1632 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1633 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1634 if (control->on_read_q) {
1638 * We only add to the
1639 * size-on-all-streams if its not on
1640 * the read q. The read q flag will
1641 * cause a sballoc so its accounted
1644 asoc->size_on_all_streams += lenadded;
1647 if (control->end_added && control->pdapi_started) {
1648 if (strm->pd_api_started) {
1649 strm->pd_api_started = 0;
1650 control->pdapi_started = 0;
1652 if (control->on_read_q == 0) {
1653 sctp_add_to_readq(stcb->sctp_ep, stcb,
1655 &stcb->sctp_socket->so_rcv, control->end_added,
1656 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1666 /* Need to wakeup the reader */
1667 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1671 static struct sctp_queued_to_read *
1672 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1674 struct sctp_queued_to_read *control;
1677 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1678 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1683 if (idata_supported) {
1684 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1685 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1690 control = TAILQ_FIRST(&strm->uno_inqueue);
1697 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1698 struct mbuf **m, int offset, int chk_length,
1699 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1700 int *break_flag, int last_chunk, uint8_t chk_type)
1702 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1703 struct sctp_stream_in *strm;
1704 uint32_t tsn, fsn, gap, mid;
1707 int need_reasm_check = 0;
1709 struct mbuf *op_err;
1710 char msg[SCTP_DIAG_INFO_LEN];
1711 struct sctp_queued_to_read *control, *ncontrol;
1714 struct sctp_stream_reset_list *liste;
1717 int created_control = 0;
1719 if (chk_type == SCTP_IDATA) {
1720 struct sctp_idata_chunk *chunk, chunk_buf;
1722 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1723 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1724 chk_flags = chunk->ch.chunk_flags;
1725 clen = sizeof(struct sctp_idata_chunk);
1726 tsn = ntohl(chunk->dp.tsn);
1727 sid = ntohs(chunk->dp.sid);
1728 mid = ntohl(chunk->dp.mid);
1729 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1731 ppid = chunk->dp.ppid_fsn.ppid;
1733 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1734 ppid = 0xffffffff; /* Use as an invalid value. */
1737 struct sctp_data_chunk *chunk, chunk_buf;
1739 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1740 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1741 chk_flags = chunk->ch.chunk_flags;
1742 clen = sizeof(struct sctp_data_chunk);
1743 tsn = ntohl(chunk->dp.tsn);
1744 sid = ntohs(chunk->dp.sid);
1745 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1747 ppid = chunk->dp.ppid;
1749 if ((size_t)chk_length == clen) {
1751 * Need to send an abort since we had a empty data chunk.
1753 op_err = sctp_generate_no_user_data_cause(tsn);
1754 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1755 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1759 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1760 asoc->send_sack = 1;
1762 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1763 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1764 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1769 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1770 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1771 /* It is a duplicate */
1772 SCTP_STAT_INCR(sctps_recvdupdata);
1773 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1774 /* Record a dup for the next outbound sack */
1775 asoc->dup_tsns[asoc->numduptsns] = tsn;
1778 asoc->send_sack = 1;
1781 /* Calculate the number of TSN's between the base and this TSN */
1782 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1783 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1784 /* Can't hold the bit in the mapping at max array, toss it */
1787 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1788 SCTP_TCB_LOCK_ASSERT(stcb);
1789 if (sctp_expand_mapping_array(asoc, gap)) {
1790 /* Can't expand, drop it */
1794 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1797 /* See if we have received this one already */
1798 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1799 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1800 SCTP_STAT_INCR(sctps_recvdupdata);
1801 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1802 /* Record a dup for the next outbound sack */
1803 asoc->dup_tsns[asoc->numduptsns] = tsn;
1806 asoc->send_sack = 1;
1810 * Check to see about the GONE flag, duplicates would cause a sack
1811 * to be sent up above
1813 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1814 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1815 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1817 * wait a minute, this guy is gone, there is no longer a
1818 * receiver. Send peer an ABORT!
1820 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1821 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1826 * Now before going further we see if there is room. If NOT then we
1827 * MAY let one through only IF this TSN is the one we are waiting
1828 * for on a partial delivery API.
1831 /* Is the stream valid? */
1832 if (sid >= asoc->streamincnt) {
1833 struct sctp_error_invalid_stream *cause;
1835 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1836 0, M_NOWAIT, 1, MT_DATA);
1837 if (op_err != NULL) {
1838 /* add some space up front so prepend will work well */
1839 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1840 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1842 * Error causes are just param's and this one has
1843 * two back to back phdr, one with the error type
1844 * and size, the other with the streamid and a rsvd
1846 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1847 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1848 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1849 cause->stream_id = htons(sid);
1850 cause->reserved = htons(0);
1851 sctp_queue_op_err(stcb, op_err);
1853 SCTP_STAT_INCR(sctps_badsid);
1854 SCTP_TCB_LOCK_ASSERT(stcb);
1855 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1856 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1857 asoc->highest_tsn_inside_nr_map = tsn;
1859 if (tsn == (asoc->cumulative_tsn + 1)) {
1860 /* Update cum-ack */
1861 asoc->cumulative_tsn = tsn;
1866 * If its a fragmented message, lets see if we can find the control
1867 * on the reassembly queues.
1869 if ((chk_type == SCTP_IDATA) &&
1870 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1873 * The first *must* be fsn 0, and other (middle/end) pieces
1874 * can *not* be fsn 0. XXX: This can happen in case of a
1875 * wrap around. Ignore is for now.
1877 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1880 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1881 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1882 chk_flags, control);
1883 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1884 /* See if we can find the re-assembly entity */
1885 if (control != NULL) {
1886 /* We found something, does it belong? */
1887 if (ordered && (mid != control->mid)) {
1888 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1890 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1891 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1892 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1896 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1898 * We can't have a switched order with an
1901 SCTP_SNPRINTF(msg, sizeof(msg),
1902 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1906 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1908 * We can't have a switched unordered with a
1911 SCTP_SNPRINTF(msg, sizeof(msg),
1912 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1919 * Its a complete segment. Lets validate we don't have a
1920 * re-assembly going on with the same Stream/Seq (for
1921 * ordered) or in the same Stream for unordered.
1923 if (control != NULL) {
1924 if (ordered || asoc->idata_supported) {
1925 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1927 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1930 if ((tsn == control->fsn_included + 1) &&
1931 (control->end_added == 0)) {
1932 SCTP_SNPRINTF(msg, sizeof(msg),
1933 "Illegal message sequence, missing end for MID: %8.8x",
1934 control->fsn_included);
1942 /* now do the tests */
1943 if (((asoc->cnt_on_all_streams +
1944 asoc->cnt_on_reasm_queue +
1945 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1946 (((int)asoc->my_rwnd) <= 0)) {
1948 * When we have NO room in the rwnd we check to make sure
1949 * the reader is doing its job...
1951 if (stcb->sctp_socket->so_rcv.sb_cc) {
1952 /* some to read, wake-up */
1953 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1955 /* now is it in the mapping array of what we have accepted? */
1956 if (chk_type == SCTP_DATA) {
1957 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1958 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1959 /* Nope not in the valid range dump it */
1961 sctp_set_rwnd(stcb, asoc);
1962 if ((asoc->cnt_on_all_streams +
1963 asoc->cnt_on_reasm_queue +
1964 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1965 SCTP_STAT_INCR(sctps_datadropchklmt);
1967 SCTP_STAT_INCR(sctps_datadroprwnd);
1973 if (control == NULL) {
1976 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1981 #ifdef SCTP_ASOCLOG_OF_TSNS
1982 SCTP_TCB_LOCK_ASSERT(stcb);
1983 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1984 asoc->tsn_in_at = 0;
1985 asoc->tsn_in_wrapped = 1;
1987 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1988 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1989 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1990 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1991 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1992 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1993 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1994 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1998 * Before we continue lets validate that we are not being fooled by
1999 * an evil attacker. We can only have Nk chunks based on our TSN
2000 * spread allowed by the mapping array N * 8 bits, so there is no
2001 * way our stream sequence numbers could have wrapped. We of course
2002 * only validate the FIRST fragment so the bit must be set.
2004 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2005 (TAILQ_EMPTY(&asoc->resetHead)) &&
2006 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2007 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2008 /* The incoming sseq is behind where we last delivered? */
2009 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2010 mid, asoc->strmin[sid].last_mid_delivered);
2012 if (asoc->idata_supported) {
2013 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2014 asoc->strmin[sid].last_mid_delivered,
2019 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2020 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2025 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2026 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2027 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2031 if (chk_type == SCTP_IDATA) {
2032 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2034 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2036 if (last_chunk == 0) {
2037 if (chk_type == SCTP_IDATA) {
2038 dmbuf = SCTP_M_COPYM(*m,
2039 (offset + sizeof(struct sctp_idata_chunk)),
2042 dmbuf = SCTP_M_COPYM(*m,
2043 (offset + sizeof(struct sctp_data_chunk)),
2046 #ifdef SCTP_MBUF_LOGGING
2047 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2048 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2052 /* We can steal the last chunk */
2056 /* lop off the top part */
2057 if (chk_type == SCTP_IDATA) {
2058 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2060 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2062 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2063 l_len = SCTP_BUF_LEN(dmbuf);
2066 * need to count up the size hopefully does not hit
2072 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2073 l_len += SCTP_BUF_LEN(lat);
2076 if (l_len > the_len) {
2077 /* Trim the end round bytes off too */
2078 m_adj(dmbuf, -(l_len - the_len));
2081 if (dmbuf == NULL) {
2082 SCTP_STAT_INCR(sctps_nomem);
2086 * Now no matter what, we need a control, get one if we don't have
2087 * one (we may have gotten it above when we found the message was
2090 if (control == NULL) {
2091 sctp_alloc_a_readq(stcb, control);
2092 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2097 if (control == NULL) {
2098 SCTP_STAT_INCR(sctps_nomem);
2101 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2104 control->data = dmbuf;
2105 control->tail_mbuf = NULL;
2106 for (mm = control->data; mm; mm = mm->m_next) {
2107 control->length += SCTP_BUF_LEN(mm);
2108 if (SCTP_BUF_NEXT(mm) == NULL) {
2109 control->tail_mbuf = mm;
2112 control->end_added = 1;
2113 control->last_frag_seen = 1;
2114 control->first_frag_seen = 1;
2115 control->fsn_included = fsn;
2116 control->top_fsn = fsn;
2118 created_control = 1;
2120 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2121 chk_flags, ordered, mid, control);
2122 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2123 TAILQ_EMPTY(&asoc->resetHead) &&
2125 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2126 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2127 /* Candidate for express delivery */
2129 * Its not fragmented, No PD-API is up, Nothing in the
2130 * delivery queue, Its un-ordered OR ordered and the next to
2131 * deliver AND nothing else is stuck on the stream queue,
2132 * And there is room for it in the socket buffer. Lets just
2133 * stuff it up the buffer....
2135 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2136 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2137 asoc->highest_tsn_inside_nr_map = tsn;
2139 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2142 sctp_add_to_readq(stcb->sctp_ep, stcb,
2143 control, &stcb->sctp_socket->so_rcv,
2144 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2146 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2147 /* for ordered, bump what we delivered */
2148 asoc->strmin[sid].last_mid_delivered++;
2150 SCTP_STAT_INCR(sctps_recvexpress);
2151 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2152 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2153 SCTP_STR_LOG_FROM_EXPRS_DEL);
2156 goto finish_express_del;
2159 /* Now will we need a chunk too? */
2160 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2161 sctp_alloc_a_chunk(stcb, chk);
2163 /* No memory so we drop the chunk */
2164 SCTP_STAT_INCR(sctps_nomem);
2165 if (last_chunk == 0) {
2166 /* we copied it, free the copy */
2167 sctp_m_freem(dmbuf);
2171 chk->rec.data.tsn = tsn;
2172 chk->no_fr_allowed = 0;
2173 chk->rec.data.fsn = fsn;
2174 chk->rec.data.mid = mid;
2175 chk->rec.data.sid = sid;
2176 chk->rec.data.ppid = ppid;
2177 chk->rec.data.context = stcb->asoc.context;
2178 chk->rec.data.doing_fast_retransmit = 0;
2179 chk->rec.data.rcv_flags = chk_flags;
2181 chk->send_size = the_len;
2183 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2186 atomic_add_int(&net->ref_count, 1);
2189 /* Set the appropriate TSN mark */
2190 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2191 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2192 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2193 asoc->highest_tsn_inside_nr_map = tsn;
2196 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2197 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2198 asoc->highest_tsn_inside_map = tsn;
2201 /* Now is it complete (i.e. not fragmented)? */
2202 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2204 * Special check for when streams are resetting. We could be
2205 * more smart about this and check the actual stream to see
2206 * if it is not being reset.. that way we would not create a
2207 * HOLB when amongst streams being reset and those not being
2211 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2212 SCTP_TSN_GT(tsn, liste->tsn)) {
2214 * yep its past where we need to reset... go ahead
2217 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2219 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2221 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2222 unsigned char inserted = 0;
2224 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2225 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2230 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2235 if (inserted == 0) {
2237 * must be put at end, use prevP
2238 * (all setup from loop) to setup
2241 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2244 goto finish_express_del;
2246 if (chk_flags & SCTP_DATA_UNORDERED) {
2247 /* queue directly into socket buffer */
2248 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2250 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2251 sctp_add_to_readq(stcb->sctp_ep, stcb,
2253 &stcb->sctp_socket->so_rcv, 1,
2254 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2257 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2259 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2267 goto finish_express_del;
2269 /* If we reach here its a reassembly */
2270 need_reasm_check = 1;
2271 SCTPDBG(SCTP_DEBUG_XXX,
2272 "Queue data to stream for reasm control: %p MID: %u\n",
2274 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2277 * the assoc is now gone and chk was put onto the reasm
2278 * queue, which has all been freed.
2286 /* Here we tidy up things */
2287 if (tsn == (asoc->cumulative_tsn + 1)) {
2288 /* Update cum-ack */
2289 asoc->cumulative_tsn = tsn;
2295 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2297 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2299 SCTP_STAT_INCR(sctps_recvdata);
2300 /* Set it present please */
2301 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2302 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2304 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2305 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2306 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2308 if (need_reasm_check) {
2309 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2310 need_reasm_check = 0;
2312 /* check the special flag for stream resets */
2313 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2314 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2316 * we have finished working through the backlogged TSN's now
2317 * time to reset streams. 1: call reset function. 2: free
2318 * pending_reply space 3: distribute any chunks in
2319 * pending_reply_queue.
2321 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2322 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2323 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2324 SCTP_FREE(liste, SCTP_M_STRESET);
2325 /* sa_ignore FREED_MEMORY */
2326 liste = TAILQ_FIRST(&asoc->resetHead);
2327 if (TAILQ_EMPTY(&asoc->resetHead)) {
2328 /* All can be removed */
2329 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2330 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2331 strm = &asoc->strmin[control->sinfo_stream];
2332 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2336 if (need_reasm_check) {
2337 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2338 need_reasm_check = 0;
2342 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2343 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2347 * if control->sinfo_tsn is <= liste->tsn we
2348 * can process it which is the NOT of
2349 * control->sinfo_tsn > liste->tsn
2351 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2352 strm = &asoc->strmin[control->sinfo_stream];
2353 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2357 if (need_reasm_check) {
2358 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2359 need_reasm_check = 0;
2367 static const int8_t sctp_map_lookup_tab[256] = {
2368 0, 1, 0, 2, 0, 1, 0, 3,
2369 0, 1, 0, 2, 0, 1, 0, 4,
2370 0, 1, 0, 2, 0, 1, 0, 3,
2371 0, 1, 0, 2, 0, 1, 0, 5,
2372 0, 1, 0, 2, 0, 1, 0, 3,
2373 0, 1, 0, 2, 0, 1, 0, 4,
2374 0, 1, 0, 2, 0, 1, 0, 3,
2375 0, 1, 0, 2, 0, 1, 0, 6,
2376 0, 1, 0, 2, 0, 1, 0, 3,
2377 0, 1, 0, 2, 0, 1, 0, 4,
2378 0, 1, 0, 2, 0, 1, 0, 3,
2379 0, 1, 0, 2, 0, 1, 0, 5,
2380 0, 1, 0, 2, 0, 1, 0, 3,
2381 0, 1, 0, 2, 0, 1, 0, 4,
2382 0, 1, 0, 2, 0, 1, 0, 3,
2383 0, 1, 0, 2, 0, 1, 0, 7,
2384 0, 1, 0, 2, 0, 1, 0, 3,
2385 0, 1, 0, 2, 0, 1, 0, 4,
2386 0, 1, 0, 2, 0, 1, 0, 3,
2387 0, 1, 0, 2, 0, 1, 0, 5,
2388 0, 1, 0, 2, 0, 1, 0, 3,
2389 0, 1, 0, 2, 0, 1, 0, 4,
2390 0, 1, 0, 2, 0, 1, 0, 3,
2391 0, 1, 0, 2, 0, 1, 0, 6,
2392 0, 1, 0, 2, 0, 1, 0, 3,
2393 0, 1, 0, 2, 0, 1, 0, 4,
2394 0, 1, 0, 2, 0, 1, 0, 3,
2395 0, 1, 0, 2, 0, 1, 0, 5,
2396 0, 1, 0, 2, 0, 1, 0, 3,
2397 0, 1, 0, 2, 0, 1, 0, 4,
2398 0, 1, 0, 2, 0, 1, 0, 3,
2399 0, 1, 0, 2, 0, 1, 0, 8
2404 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2407 * Now we also need to check the mapping array in a couple of ways.
2408 * 1) Did we move the cum-ack point?
2410 * When you first glance at this you might think that all entries
2411 * that make up the position of the cum-ack would be in the
2412 * nr-mapping array only.. i.e. things up to the cum-ack are always
2413 * deliverable. Thats true with one exception, when its a fragmented
2414 * message we may not deliver the data until some threshold (or all
2415 * of it) is in place. So we must OR the nr_mapping_array and
2416 * mapping_array to get a true picture of the cum-ack.
2418 struct sctp_association *asoc;
2421 int slide_from, slide_end, lgap, distance;
2422 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2426 old_cumack = asoc->cumulative_tsn;
2427 old_base = asoc->mapping_array_base_tsn;
2428 old_highest = asoc->highest_tsn_inside_map;
2430 * We could probably improve this a small bit by calculating the
2431 * offset of the current cum-ack as the starting point.
2434 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2435 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2439 /* there is a 0 bit */
2440 at += sctp_map_lookup_tab[val];
2444 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2446 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2447 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2449 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2450 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2452 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2453 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2454 sctp_print_mapping_array(asoc);
2455 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2456 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2458 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2459 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2462 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2463 highest_tsn = asoc->highest_tsn_inside_nr_map;
2465 highest_tsn = asoc->highest_tsn_inside_map;
2467 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2468 /* The complete array was completed by a single FR */
2469 /* highest becomes the cum-ack */
2475 /* clear the array */
2476 clr = ((at + 7) >> 3);
2477 if (clr > asoc->mapping_array_size) {
2478 clr = asoc->mapping_array_size;
2480 memset(asoc->mapping_array, 0, clr);
2481 memset(asoc->nr_mapping_array, 0, clr);
2483 for (i = 0; i < asoc->mapping_array_size; i++) {
2484 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2485 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2486 sctp_print_mapping_array(asoc);
2490 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2491 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2492 } else if (at >= 8) {
2493 /* we can slide the mapping array down */
2494 /* slide_from holds where we hit the first NON 0xff byte */
2497 * now calculate the ceiling of the move using our highest
2500 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2501 slide_end = (lgap >> 3);
2502 if (slide_end < slide_from) {
2503 sctp_print_mapping_array(asoc);
2505 panic("impossible slide");
2507 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2508 lgap, slide_end, slide_from, at);
2512 if (slide_end > asoc->mapping_array_size) {
2514 panic("would overrun buffer");
2516 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2517 asoc->mapping_array_size, slide_end);
2518 slide_end = asoc->mapping_array_size;
2521 distance = (slide_end - slide_from) + 1;
2522 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2523 sctp_log_map(old_base, old_cumack, old_highest,
2524 SCTP_MAP_PREPARE_SLIDE);
2525 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2526 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2528 if (distance + slide_from > asoc->mapping_array_size ||
2531 * Here we do NOT slide forward the array so that
2532 * hopefully when more data comes in to fill it up
2533 * we will be able to slide it forward. Really I
2534 * don't think this should happen :-0
2537 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2538 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2539 (uint32_t)asoc->mapping_array_size,
2540 SCTP_MAP_SLIDE_NONE);
2545 for (ii = 0; ii < distance; ii++) {
2546 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2547 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2550 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2551 asoc->mapping_array[ii] = 0;
2552 asoc->nr_mapping_array[ii] = 0;
2554 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2555 asoc->highest_tsn_inside_map += (slide_from << 3);
2557 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2558 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2560 asoc->mapping_array_base_tsn += (slide_from << 3);
2561 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2562 sctp_log_map(asoc->mapping_array_base_tsn,
2563 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2564 SCTP_MAP_SLIDE_RESULT);
2571 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2573 struct sctp_association *asoc;
2574 uint32_t highest_tsn;
2577 sctp_slide_mapping_arrays(stcb);
2579 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2580 highest_tsn = asoc->highest_tsn_inside_nr_map;
2582 highest_tsn = asoc->highest_tsn_inside_map;
2584 /* Is there a gap now? */
2585 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2588 * Now we need to see if we need to queue a sack or just start the
2589 * timer (if allowed).
2591 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2593 * Ok special case, in SHUTDOWN-SENT case. here we maker
2594 * sure SACK timer is off and instead send a SHUTDOWN and a
2597 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2598 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2599 stcb->sctp_ep, stcb, NULL,
2600 SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2602 sctp_send_shutdown(stcb,
2603 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2605 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2609 * CMT DAC algorithm: increase number of packets received
2612 stcb->asoc.cmt_dac_pkts_rcvd++;
2614 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2616 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2618 (stcb->asoc.numduptsns) || /* we have dup's */
2619 (is_a_gap) || /* is still a gap */
2620 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2621 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2624 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2625 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2626 (stcb->asoc.send_sack == 0) &&
2627 (stcb->asoc.numduptsns == 0) &&
2628 (stcb->asoc.delayed_ack) &&
2629 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2632 * CMT DAC algorithm: With CMT, delay acks
2633 * even in the face of
2635 * reordering. Therefore, if acks that do
2636 * not have to be sent because of the above
2637 * reasons, will be delayed. That is, acks
2638 * that would have been sent due to gap
2639 * reports will be delayed with DAC. Start
2640 * the delayed ack timer.
2642 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2643 stcb->sctp_ep, stcb, NULL);
2646 * Ok we must build a SACK since the timer
2647 * is pending, we got our first packet OR
2648 * there are gaps or duplicates.
2650 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2651 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2652 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2655 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2656 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2657 stcb->sctp_ep, stcb, NULL);
2664 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2665 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2666 struct sctp_nets *net, uint32_t *high_tsn)
2668 struct sctp_chunkhdr *ch, chunk_buf;
2669 struct sctp_association *asoc;
2670 int num_chunks = 0; /* number of control chunks processed */
2672 int break_flag, last_chunk;
2673 int abort_flag = 0, was_a_gap;
2675 uint32_t highest_tsn;
2676 uint16_t chk_length;
2679 sctp_set_rwnd(stcb, &stcb->asoc);
2682 SCTP_TCB_LOCK_ASSERT(stcb);
2684 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2685 highest_tsn = asoc->highest_tsn_inside_nr_map;
2687 highest_tsn = asoc->highest_tsn_inside_map;
2689 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2691 * setup where we got the last DATA packet from for any SACK that
2692 * may need to go out. Don't bump the net. This is done ONLY when a
2693 * chunk is assigned.
2695 asoc->last_data_chunk_from = net;
2698 * Now before we proceed we must figure out if this is a wasted
2699 * cluster... i.e. it is a small packet sent in and yet the driver
2700 * underneath allocated a full cluster for it. If so we must copy it
2701 * to a smaller mbuf and free up the cluster mbuf. This will help
2702 * with cluster starvation.
2704 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2705 /* we only handle mbufs that are singletons.. not chains */
2706 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2708 /* ok lets see if we can copy the data up */
2711 /* get the pointers and copy */
2712 to = mtod(m, caddr_t *);
2713 from = mtod((*mm), caddr_t *);
2714 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2715 /* copy the length and free up the old */
2716 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2718 /* success, back copy */
2721 /* We are in trouble in the mbuf world .. yikes */
2725 /* get pointer to the first chunk header */
2726 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2727 sizeof(struct sctp_chunkhdr),
2728 (uint8_t *)&chunk_buf);
2733 * process all DATA chunks...
2735 *high_tsn = asoc->cumulative_tsn;
2737 asoc->data_pkts_seen++;
2738 while (stop_proc == 0) {
2739 /* validate chunk length */
2740 chk_length = ntohs(ch->chunk_length);
2741 if (length - *offset < chk_length) {
2742 /* all done, mutulated chunk */
2746 if ((asoc->idata_supported == 1) &&
2747 (ch->chunk_type == SCTP_DATA)) {
2748 struct mbuf *op_err;
2749 char msg[SCTP_DIAG_INFO_LEN];
2751 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2752 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2753 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2754 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2757 if ((asoc->idata_supported == 0) &&
2758 (ch->chunk_type == SCTP_IDATA)) {
2759 struct mbuf *op_err;
2760 char msg[SCTP_DIAG_INFO_LEN];
2762 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2763 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2764 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2765 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2768 if ((ch->chunk_type == SCTP_DATA) ||
2769 (ch->chunk_type == SCTP_IDATA)) {
2772 if (ch->chunk_type == SCTP_DATA) {
2773 clen = sizeof(struct sctp_data_chunk);
2775 clen = sizeof(struct sctp_idata_chunk);
2777 if (chk_length < clen) {
2779 * Need to send an abort since we had a
2780 * invalid data chunk.
2782 struct mbuf *op_err;
2783 char msg[SCTP_DIAG_INFO_LEN];
2785 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2786 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2788 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2789 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2790 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2793 #ifdef SCTP_AUDITING_ENABLED
2794 sctp_audit_log(0xB1, 0);
2796 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2801 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2802 chk_length, net, high_tsn, &abort_flag, &break_flag,
2803 last_chunk, ch->chunk_type)) {
2811 * Set because of out of rwnd space and no
2812 * drop rep space left.
2818 /* not a data chunk in the data region */
2819 switch (ch->chunk_type) {
2820 case SCTP_INITIATION:
2821 case SCTP_INITIATION_ACK:
2822 case SCTP_SELECTIVE_ACK:
2823 case SCTP_NR_SELECTIVE_ACK:
2824 case SCTP_HEARTBEAT_REQUEST:
2825 case SCTP_HEARTBEAT_ACK:
2826 case SCTP_ABORT_ASSOCIATION:
2828 case SCTP_SHUTDOWN_ACK:
2829 case SCTP_OPERATION_ERROR:
2830 case SCTP_COOKIE_ECHO:
2831 case SCTP_COOKIE_ACK:
2834 case SCTP_SHUTDOWN_COMPLETE:
2835 case SCTP_AUTHENTICATION:
2836 case SCTP_ASCONF_ACK:
2837 case SCTP_PACKET_DROPPED:
2838 case SCTP_STREAM_RESET:
2839 case SCTP_FORWARD_CUM_TSN:
2843 * Now, what do we do with KNOWN
2844 * chunks that are NOT in the right
2847 * For now, I do nothing but ignore
2848 * them. We may later want to add
2849 * sysctl stuff to switch out and do
2850 * either an ABORT() or possibly
2853 struct mbuf *op_err;
2854 char msg[SCTP_DIAG_INFO_LEN];
2856 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2858 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2859 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2864 * Unknown chunk type: use bit rules after
2867 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2869 * Need to send an abort since we
2870 * had a invalid chunk.
2872 struct mbuf *op_err;
2873 char msg[SCTP_DIAG_INFO_LEN];
2875 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2876 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2877 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2878 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2881 if (ch->chunk_type & 0x40) {
2882 /* Add a error report to the queue */
2883 struct mbuf *op_err;
2884 struct sctp_gen_error_cause *cause;
2886 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2887 0, M_NOWAIT, 1, MT_DATA);
2888 if (op_err != NULL) {
2889 cause = mtod(op_err, struct sctp_gen_error_cause *);
2890 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2891 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2892 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2893 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2894 if (SCTP_BUF_NEXT(op_err) != NULL) {
2895 sctp_queue_op_err(stcb, op_err);
2897 sctp_m_freem(op_err);
2901 if ((ch->chunk_type & 0x80) == 0) {
2902 /* discard the rest of this packet */
2904 } /* else skip this bad chunk and
2907 } /* switch of chunk type */
2909 *offset += SCTP_SIZE32(chk_length);
2910 if ((*offset >= length) || stop_proc) {
2911 /* no more data left in the mbuf chain */
2915 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2916 sizeof(struct sctp_chunkhdr),
2917 (uint8_t *)&chunk_buf);
2926 * we need to report rwnd overrun drops.
2928 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2932 * Did we get data, if so update the time for auto-close and
2933 * give peer credit for being alive.
2935 SCTP_STAT_INCR(sctps_recvpktwithdata);
2936 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2937 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2938 stcb->asoc.overall_error_count,
2940 SCTP_FROM_SCTP_INDATA,
2943 stcb->asoc.overall_error_count = 0;
2944 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2946 /* now service all of the reassm queue if needed */
2947 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2948 /* Assure that we ack right away */
2949 stcb->asoc.send_sack = 1;
2951 /* Start a sack timer or QUEUE a SACK for sending */
2952 sctp_sack_check(stcb, was_a_gap);
2957 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2958 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2960 uint32_t *biggest_newly_acked_tsn,
2961 uint32_t *this_sack_lowest_newack,
2964 struct sctp_tmit_chunk *tp1;
2965 unsigned int theTSN;
2966 int j, wake_him = 0, circled = 0;
2968 /* Recover the tp1 we last saw */
2971 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2973 for (j = frag_strt; j <= frag_end; j++) {
2974 theTSN = j + last_tsn;
2976 if (tp1->rec.data.doing_fast_retransmit)
2980 * CMT: CUCv2 algorithm. For each TSN being
2981 * processed from the sent queue, track the
2982 * next expected pseudo-cumack, or
2983 * rtx_pseudo_cumack, if required. Separate
2984 * cumack trackers for first transmissions,
2985 * and retransmissions.
2987 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2988 (tp1->whoTo->find_pseudo_cumack == 1) &&
2989 (tp1->snd_count == 1)) {
2990 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2991 tp1->whoTo->find_pseudo_cumack = 0;
2993 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2994 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2995 (tp1->snd_count > 1)) {
2996 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2997 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2999 if (tp1->rec.data.tsn == theTSN) {
3000 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3002 * must be held until
3005 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3007 * If it is less than RESEND, it is
3008 * now no-longer in flight.
3009 * Higher values may already be set
3010 * via previous Gap Ack Blocks...
3011 * i.e. ACKED or RESEND.
3013 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3014 *biggest_newly_acked_tsn)) {
3015 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3018 * CMT: SFR algo (and HTNA) - set
3019 * saw_newack to 1 for dest being
3020 * newly acked. update
3021 * this_sack_highest_newack if
3024 if (tp1->rec.data.chunk_was_revoked == 0)
3025 tp1->whoTo->saw_newack = 1;
3027 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3028 tp1->whoTo->this_sack_highest_newack)) {
3029 tp1->whoTo->this_sack_highest_newack =
3033 * CMT DAC algo: also update
3034 * this_sack_lowest_newack
3036 if (*this_sack_lowest_newack == 0) {
3037 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3038 sctp_log_sack(*this_sack_lowest_newack,
3043 SCTP_LOG_TSN_ACKED);
3045 *this_sack_lowest_newack = tp1->rec.data.tsn;
3048 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3049 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3050 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3051 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3052 * Separate pseudo_cumack trackers for first transmissions and
3055 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3056 if (tp1->rec.data.chunk_was_revoked == 0) {
3057 tp1->whoTo->new_pseudo_cumack = 1;
3059 tp1->whoTo->find_pseudo_cumack = 1;
3061 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3062 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3064 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3065 if (tp1->rec.data.chunk_was_revoked == 0) {
3066 tp1->whoTo->new_pseudo_cumack = 1;
3068 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3070 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3071 sctp_log_sack(*biggest_newly_acked_tsn,
3076 SCTP_LOG_TSN_ACKED);
3078 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3079 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3080 tp1->whoTo->flight_size,
3082 (uint32_t)(uintptr_t)tp1->whoTo,
3085 sctp_flight_size_decrease(tp1);
3086 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3087 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3090 sctp_total_flight_decrease(stcb, tp1);
3092 tp1->whoTo->net_ack += tp1->send_size;
3093 if (tp1->snd_count < 2) {
3095 * True non-retransmitted chunk
3097 tp1->whoTo->net_ack2 += tp1->send_size;
3104 sctp_calculate_rto(stcb,
3107 &tp1->sent_rcv_time,
3108 SCTP_RTT_FROM_DATA)) {
3111 if (tp1->whoTo->rto_needed == 0) {
3112 tp1->whoTo->rto_needed = 1;
3119 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3120 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3121 stcb->asoc.this_sack_highest_gap)) {
3122 stcb->asoc.this_sack_highest_gap =
3125 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3126 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3127 #ifdef SCTP_AUDITING_ENABLED
3128 sctp_audit_log(0xB2,
3129 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3134 * All chunks NOT UNSENT fall through here and are marked
3135 * (leave PR-SCTP ones that are to skip alone though)
3137 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3138 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3139 tp1->sent = SCTP_DATAGRAM_MARKED;
3141 if (tp1->rec.data.chunk_was_revoked) {
3142 /* deflate the cwnd */
3143 tp1->whoTo->cwnd -= tp1->book_size;
3144 tp1->rec.data.chunk_was_revoked = 0;
3146 /* NR Sack code here */
3148 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3149 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3150 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3153 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3156 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3157 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3158 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3159 stcb->asoc.trigger_reset = 1;
3161 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3167 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3168 sctp_m_freem(tp1->data);
3175 } /* if (tp1->tsn == theTSN) */
3176 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3179 tp1 = TAILQ_NEXT(tp1, sctp_next);
3180 if ((tp1 == NULL) && (circled == 0)) {
3182 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3184 } /* end while (tp1) */
3187 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3189 /* In case the fragments were not in order we must reset */
3190 } /* end for (j = fragStart */
3192 return (wake_him); /* Return value only used for nr-sack */
3197 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3198 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3199 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3200 int num_seg, int num_nr_seg, int *rto_ok)
3202 struct sctp_gap_ack_block *frag, block;
3203 struct sctp_tmit_chunk *tp1;
3208 uint16_t frag_strt, frag_end, prev_frag_end;
3210 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3214 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3217 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3219 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3220 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3221 *offset += sizeof(block);
3223 return (chunk_freed);
3225 frag_strt = ntohs(frag->start);
3226 frag_end = ntohs(frag->end);
3228 if (frag_strt > frag_end) {
3229 /* This gap report is malformed, skip it. */
3232 if (frag_strt <= prev_frag_end) {
3233 /* This gap report is not in order, so restart. */
3234 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3236 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3237 *biggest_tsn_acked = last_tsn + frag_end;
3244 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3245 non_revocable, &num_frs, biggest_newly_acked_tsn,
3246 this_sack_lowest_newack, rto_ok)) {
3249 prev_frag_end = frag_end;
3251 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3253 sctp_log_fr(*biggest_tsn_acked,
3254 *biggest_newly_acked_tsn,
3255 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3257 return (chunk_freed);
3261 sctp_check_for_revoked(struct sctp_tcb *stcb,
3262 struct sctp_association *asoc, uint32_t cumack,
3263 uint32_t biggest_tsn_acked)
3265 struct sctp_tmit_chunk *tp1;
3267 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3268 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3270 * ok this guy is either ACK or MARKED. If it is
3271 * ACKED it has been previously acked but not this
3272 * time i.e. revoked. If it is MARKED it was ACK'ed
3275 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3278 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3279 /* it has been revoked */
3280 tp1->sent = SCTP_DATAGRAM_SENT;
3281 tp1->rec.data.chunk_was_revoked = 1;
3283 * We must add this stuff back in to assure
3284 * timers and such get started.
3286 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3287 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3288 tp1->whoTo->flight_size,
3290 (uint32_t)(uintptr_t)tp1->whoTo,
3293 sctp_flight_size_increase(tp1);
3294 sctp_total_flight_increase(stcb, tp1);
3296 * We inflate the cwnd to compensate for our
3297 * artificial inflation of the flight_size.
3299 tp1->whoTo->cwnd += tp1->book_size;
3300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3301 sctp_log_sack(asoc->last_acked_seq,
3306 SCTP_LOG_TSN_REVOKED);
3308 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3309 /* it has been re-acked in this SACK */
3310 tp1->sent = SCTP_DATAGRAM_ACKED;
3313 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3320 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3321 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3323 struct sctp_tmit_chunk *tp1;
3324 int strike_flag = 0;
3326 int tot_retrans = 0;
3327 uint32_t sending_seq;
3328 struct sctp_nets *net;
3329 int num_dests_sacked = 0;
3332 * select the sending_seq, this is either the next thing ready to be
3333 * sent but not transmitted, OR, the next seq we assign.
3335 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3337 sending_seq = asoc->sending_seq;
3339 sending_seq = tp1->rec.data.tsn;
3342 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3343 if ((asoc->sctp_cmt_on_off > 0) &&
3344 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3345 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3346 if (net->saw_newack)
3350 if (stcb->asoc.prsctp_supported) {
3351 (void)SCTP_GETTIME_TIMEVAL(&now);
3353 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3355 if (tp1->no_fr_allowed) {
3356 /* this one had a timeout or something */
3359 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3360 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3361 sctp_log_fr(biggest_tsn_newly_acked,
3364 SCTP_FR_LOG_CHECK_STRIKE);
3366 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3367 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3371 if (stcb->asoc.prsctp_supported) {
3372 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3373 /* Is it expired? */
3374 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3375 /* Yes so drop it */
3376 if (tp1->data != NULL) {
3377 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3378 SCTP_SO_NOT_LOCKED);
3385 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3386 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3387 /* we are beyond the tsn in the sack */
3390 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3391 /* either a RESEND, ACKED, or MARKED */
3393 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3394 /* Continue strikin FWD-TSN chunks */
3395 tp1->rec.data.fwd_tsn_cnt++;
3400 * CMT : SFR algo (covers part of DAC and HTNA as well)
3402 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3404 * No new acks were receieved for data sent to this
3405 * dest. Therefore, according to the SFR algo for
3406 * CMT, no data sent to this dest can be marked for
3407 * FR using this SACK.
3410 } else if (tp1->whoTo &&
3411 SCTP_TSN_GT(tp1->rec.data.tsn,
3412 tp1->whoTo->this_sack_highest_newack) &&
3413 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3415 * CMT: New acks were receieved for data sent to
3416 * this dest. But no new acks were seen for data
3417 * sent after tp1. Therefore, according to the SFR
3418 * algo for CMT, tp1 cannot be marked for FR using
3419 * this SACK. This step covers part of the DAC algo
3420 * and the HTNA algo as well.
3425 * Here we check to see if we were have already done a FR
3426 * and if so we see if the biggest TSN we saw in the sack is
3427 * smaller than the recovery point. If so we don't strike
3428 * the tsn... otherwise we CAN strike the TSN.
3431 * @@@ JRI: Check for CMT if (accum_moved &&
3432 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3435 if (accum_moved && asoc->fast_retran_loss_recovery) {
3437 * Strike the TSN if in fast-recovery and cum-ack
3440 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3441 sctp_log_fr(biggest_tsn_newly_acked,
3444 SCTP_FR_LOG_STRIKE_CHUNK);
3446 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3449 if ((asoc->sctp_cmt_on_off > 0) &&
3450 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3452 * CMT DAC algorithm: If SACK flag is set to
3453 * 0, then lowest_newack test will not pass
3454 * because it would have been set to the
3455 * cumack earlier. If not already to be
3456 * rtx'd, If not a mixed sack and if tp1 is
3457 * not between two sacked TSNs, then mark by
3458 * one more. NOTE that we are marking by one
3459 * additional time since the SACK DAC flag
3460 * indicates that two packets have been
3461 * received after this missing TSN.
3463 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3464 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3465 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3466 sctp_log_fr(16 + num_dests_sacked,
3469 SCTP_FR_LOG_STRIKE_CHUNK);
3474 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3475 (asoc->sctp_cmt_on_off == 0)) {
3477 * For those that have done a FR we must take
3478 * special consideration if we strike. I.e the
3479 * biggest_newly_acked must be higher than the
3480 * sending_seq at the time we did the FR.
3483 #ifdef SCTP_FR_TO_ALTERNATE
3485 * If FR's go to new networks, then we must only do
3486 * this for singly homed asoc's. However if the FR's
3487 * go to the same network (Armando's work) then its
3488 * ok to FR multiple times.
3496 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3497 tp1->rec.data.fast_retran_tsn)) {
3499 * Strike the TSN, since this ack is
3500 * beyond where things were when we
3503 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3504 sctp_log_fr(biggest_tsn_newly_acked,
3507 SCTP_FR_LOG_STRIKE_CHUNK);
3509 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3513 if ((asoc->sctp_cmt_on_off > 0) &&
3514 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3516 * CMT DAC algorithm: If
3517 * SACK flag is set to 0,
3518 * then lowest_newack test
3519 * will not pass because it
3520 * would have been set to
3521 * the cumack earlier. If
3522 * not already to be rtx'd,
3523 * If not a mixed sack and
3524 * if tp1 is not between two
3525 * sacked TSNs, then mark by
3526 * one more. NOTE that we
3527 * are marking by one
3528 * additional time since the
3529 * SACK DAC flag indicates
3530 * that two packets have
3531 * been received after this
3534 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3535 (num_dests_sacked == 1) &&
3536 SCTP_TSN_GT(this_sack_lowest_newack,
3537 tp1->rec.data.tsn)) {
3538 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3539 sctp_log_fr(32 + num_dests_sacked,
3542 SCTP_FR_LOG_STRIKE_CHUNK);
3544 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3552 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3555 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3556 biggest_tsn_newly_acked)) {
3558 * We don't strike these: This is the HTNA
3559 * algorithm i.e. we don't strike If our TSN is
3560 * larger than the Highest TSN Newly Acked.
3564 /* Strike the TSN */
3565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3566 sctp_log_fr(biggest_tsn_newly_acked,
3569 SCTP_FR_LOG_STRIKE_CHUNK);
3571 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3574 if ((asoc->sctp_cmt_on_off > 0) &&
3575 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3577 * CMT DAC algorithm: If SACK flag is set to
3578 * 0, then lowest_newack test will not pass
3579 * because it would have been set to the
3580 * cumack earlier. If not already to be
3581 * rtx'd, If not a mixed sack and if tp1 is
3582 * not between two sacked TSNs, then mark by
3583 * one more. NOTE that we are marking by one
3584 * additional time since the SACK DAC flag
3585 * indicates that two packets have been
3586 * received after this missing TSN.
3588 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3589 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3590 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3591 sctp_log_fr(48 + num_dests_sacked,
3594 SCTP_FR_LOG_STRIKE_CHUNK);
3600 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3601 struct sctp_nets *alt;
3603 /* fix counts and things */
3604 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3605 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3606 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3608 (uint32_t)(uintptr_t)tp1->whoTo,
3612 tp1->whoTo->net_ack++;
3613 sctp_flight_size_decrease(tp1);
3614 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3615 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3620 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3621 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3622 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3624 /* add back to the rwnd */
3625 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3627 /* remove from the total flight */
3628 sctp_total_flight_decrease(stcb, tp1);
3630 if ((stcb->asoc.prsctp_supported) &&
3631 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3633 * Has it been retransmitted tv_sec times? -
3634 * we store the retran count there.
3636 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3637 /* Yes, so drop it */
3638 if (tp1->data != NULL) {
3639 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3640 SCTP_SO_NOT_LOCKED);
3642 /* Make sure to flag we had a FR */
3643 if (tp1->whoTo != NULL) {
3644 tp1->whoTo->net_ack++;
3650 * SCTP_PRINTF("OK, we are now ready to FR this
3653 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3654 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3658 /* This is a subsequent FR */
3659 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3661 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3662 if (asoc->sctp_cmt_on_off > 0) {
3664 * CMT: Using RTX_SSTHRESH policy for CMT.
3665 * If CMT is being used, then pick dest with
3666 * largest ssthresh for any retransmission.
3668 tp1->no_fr_allowed = 1;
3670 /* sa_ignore NO_NULL_CHK */
3671 if (asoc->sctp_cmt_pf > 0) {
3673 * JRS 5/18/07 - If CMT PF is on,
3674 * use the PF version of
3677 alt = sctp_find_alternate_net(stcb, alt, 2);
3680 * JRS 5/18/07 - If only CMT is on,
3681 * use the CMT version of
3684 /* sa_ignore NO_NULL_CHK */
3685 alt = sctp_find_alternate_net(stcb, alt, 1);
3691 * CUCv2: If a different dest is picked for
3692 * the retransmission, then new
3693 * (rtx-)pseudo_cumack needs to be tracked
3694 * for orig dest. Let CUCv2 track new (rtx-)
3695 * pseudo-cumack always.
3698 tp1->whoTo->find_pseudo_cumack = 1;
3699 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3702 } else { /* CMT is OFF */
3704 #ifdef SCTP_FR_TO_ALTERNATE
3705 /* Can we find an alternate? */
3706 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3709 * default behavior is to NOT retransmit
3710 * FR's to an alternate. Armando Caro's
3711 * paper details why.
3717 tp1->rec.data.doing_fast_retransmit = 1;
3719 /* mark the sending seq for possible subsequent FR's */
3721 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3722 * (uint32_t)tpi->rec.data.tsn);
3724 if (TAILQ_EMPTY(&asoc->send_queue)) {
3726 * If the queue of send is empty then its
3727 * the next sequence number that will be
3728 * assigned so we subtract one from this to
3729 * get the one we last sent.
3731 tp1->rec.data.fast_retran_tsn = sending_seq;
3734 * If there are chunks on the send queue
3735 * (unsent data that has made it from the
3736 * stream queues but not out the door, we
3737 * take the first one (which will have the
3738 * lowest TSN) and subtract one to get the
3741 struct sctp_tmit_chunk *ttt;
3743 ttt = TAILQ_FIRST(&asoc->send_queue);
3744 tp1->rec.data.fast_retran_tsn =
3750 * this guy had a RTO calculation pending on
3753 if ((tp1->whoTo != NULL) &&
3754 (tp1->whoTo->rto_needed == 0)) {
3755 tp1->whoTo->rto_needed = 1;
3759 if (alt != tp1->whoTo) {
3760 /* yes, there is an alternate. */
3761 sctp_free_remote_addr(tp1->whoTo);
3762 /* sa_ignore FREED_MEMORY */
3764 atomic_add_int(&alt->ref_count, 1);
3770 struct sctp_tmit_chunk *
3771 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3772 struct sctp_association *asoc)
3774 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3778 if (asoc->prsctp_supported == 0) {
3781 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3782 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3783 tp1->sent != SCTP_DATAGRAM_RESEND &&
3784 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3785 /* no chance to advance, out of here */
3788 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3789 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3790 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3791 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3792 asoc->advanced_peer_ack_point,
3793 tp1->rec.data.tsn, 0, 0);
3796 if (!PR_SCTP_ENABLED(tp1->flags)) {
3798 * We can't fwd-tsn past any that are reliable aka
3799 * retransmitted until the asoc fails.
3804 (void)SCTP_GETTIME_TIMEVAL(&now);
3808 * now we got a chunk which is marked for another
3809 * retransmission to a PR-stream but has run out its chances
3810 * already maybe OR has been marked to skip now. Can we skip
3811 * it if its a resend?
3813 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3814 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3816 * Now is this one marked for resend and its time is
3819 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3820 /* Yes so drop it */
3822 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3823 1, SCTP_SO_NOT_LOCKED);
3827 * No, we are done when hit one for resend
3828 * whos time as not expired.
3834 * Ok now if this chunk is marked to drop it we can clean up
3835 * the chunk, advance our peer ack point and we can check
3838 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3839 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3840 /* advance PeerAckPoint goes forward */
3841 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3842 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3844 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3845 /* No update but we do save the chk */
3850 * If it is still in RESEND we can advance no
3860 sctp_fs_audit(struct sctp_association *asoc)
3862 struct sctp_tmit_chunk *chk;
3863 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3866 int entry_flight, entry_cnt;
3871 entry_flight = asoc->total_flight;
3872 entry_cnt = asoc->total_flight_count;
3874 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3877 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3878 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3879 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3884 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3886 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3888 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3895 if ((inflight > 0) || (inbetween > 0)) {
3897 panic("Flight size-express incorrect? \n");
3899 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3900 entry_flight, entry_cnt);
3902 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3903 inflight, inbetween, resend, above, acked);
3912 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3913 struct sctp_association *asoc,
3914 struct sctp_tmit_chunk *tp1)
3916 tp1->window_probe = 0;
3917 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3918 /* TSN's skipped we do NOT move back. */
3919 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3920 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3922 (uint32_t)(uintptr_t)tp1->whoTo,
3926 /* First setup this by shrinking flight */
3927 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3928 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3931 sctp_flight_size_decrease(tp1);
3932 sctp_total_flight_decrease(stcb, tp1);
3933 /* Now mark for resend */
3934 tp1->sent = SCTP_DATAGRAM_RESEND;
3935 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3937 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3938 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3939 tp1->whoTo->flight_size,
3941 (uint32_t)(uintptr_t)tp1->whoTo,
3947 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3948 uint32_t rwnd, int *abort_now, int ecne_seen)
3950 struct sctp_nets *net;
3951 struct sctp_association *asoc;
3952 struct sctp_tmit_chunk *tp1, *tp2;
3954 int win_probe_recovery = 0;
3955 int win_probe_recovered = 0;
3956 int j, done_once = 0;
3960 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3961 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3962 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3964 SCTP_TCB_LOCK_ASSERT(stcb);
3965 #ifdef SCTP_ASOCLOG_OF_TSNS
3966 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3967 stcb->asoc.cumack_log_at++;
3968 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3969 stcb->asoc.cumack_log_at = 0;
3973 old_rwnd = asoc->peers_rwnd;
3974 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3977 } else if (asoc->last_acked_seq == cumack) {
3978 /* Window update sack */
3979 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3980 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3981 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3982 /* SWS sender side engages */
3983 asoc->peers_rwnd = 0;
3985 if (asoc->peers_rwnd > old_rwnd) {
3991 /* First setup for CC stuff */
3992 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3993 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3994 /* Drag along the window_tsn for cwr's */
3995 net->cwr_window_tsn = cumack;
3997 net->prev_cwnd = net->cwnd;
4002 * CMT: Reset CUC and Fast recovery algo variables before
4005 net->new_pseudo_cumack = 0;
4006 net->will_exit_fast_recovery = 0;
4007 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4008 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4011 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4012 tp1 = TAILQ_LAST(&asoc->sent_queue,
4013 sctpchunk_listhead);
4014 send_s = tp1->rec.data.tsn + 1;
4016 send_s = asoc->sending_seq;
4018 if (SCTP_TSN_GE(cumack, send_s)) {
4019 struct mbuf *op_err;
4020 char msg[SCTP_DIAG_INFO_LEN];
4024 SCTP_SNPRINTF(msg, sizeof(msg),
4025 "Cum ack %8.8x greater or equal than TSN %8.8x",
4027 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4028 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4029 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4032 asoc->this_sack_highest_gap = cumack;
4033 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4034 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4035 stcb->asoc.overall_error_count,
4037 SCTP_FROM_SCTP_INDATA,
4040 stcb->asoc.overall_error_count = 0;
4041 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4042 /* process the new consecutive TSN first */
4043 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4044 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4045 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4046 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4048 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4050 * If it is less than ACKED, it is
4051 * now no-longer in flight. Higher
4052 * values may occur during marking
4054 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4055 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4056 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4057 tp1->whoTo->flight_size,
4059 (uint32_t)(uintptr_t)tp1->whoTo,
4062 sctp_flight_size_decrease(tp1);
4063 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4064 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4067 /* sa_ignore NO_NULL_CHK */
4068 sctp_total_flight_decrease(stcb, tp1);
4070 tp1->whoTo->net_ack += tp1->send_size;
4071 if (tp1->snd_count < 2) {
4073 * True non-retransmitted
4076 tp1->whoTo->net_ack2 +=
4079 /* update RTO too? */
4082 sctp_calculate_rto(stcb,
4085 &tp1->sent_rcv_time,
4086 SCTP_RTT_FROM_DATA)) {
4089 if (tp1->whoTo->rto_needed == 0) {
4090 tp1->whoTo->rto_needed = 1;
4096 * CMT: CUCv2 algorithm. From the
4097 * cumack'd TSNs, for each TSN being
4098 * acked for the first time, set the
4099 * following variables for the
4100 * corresp destination.
4101 * new_pseudo_cumack will trigger a
4103 * find_(rtx_)pseudo_cumack will
4104 * trigger search for the next
4105 * expected (rtx-)pseudo-cumack.
4107 tp1->whoTo->new_pseudo_cumack = 1;
4108 tp1->whoTo->find_pseudo_cumack = 1;
4109 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4111 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4112 /* sa_ignore NO_NULL_CHK */
4113 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4116 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4117 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4119 if (tp1->rec.data.chunk_was_revoked) {
4120 /* deflate the cwnd */
4121 tp1->whoTo->cwnd -= tp1->book_size;
4122 tp1->rec.data.chunk_was_revoked = 0;
4124 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4125 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4126 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4129 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4133 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4134 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4135 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4136 asoc->trigger_reset = 1;
4138 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4140 /* sa_ignore NO_NULL_CHK */
4141 sctp_free_bufspace(stcb, asoc, tp1, 1);
4142 sctp_m_freem(tp1->data);
4145 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4146 sctp_log_sack(asoc->last_acked_seq,
4151 SCTP_LOG_FREE_SENT);
4153 asoc->sent_queue_cnt--;
4154 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4161 /* sa_ignore NO_NULL_CHK */
4162 if (stcb->sctp_socket) {
4163 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4164 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4165 /* sa_ignore NO_NULL_CHK */
4166 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4168 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4170 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4171 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4175 /* JRS - Use the congestion control given in the CC module */
4176 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4177 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4178 if (net->net_ack2 > 0) {
4180 * Karn's rule applies to clearing error
4181 * count, this is optional.
4183 net->error_count = 0;
4184 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4185 /* addr came good */
4186 net->dest_state |= SCTP_ADDR_REACHABLE;
4187 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4188 0, (void *)net, SCTP_SO_NOT_LOCKED);
4190 if (net == stcb->asoc.primary_destination) {
4191 if (stcb->asoc.alternate) {
4193 * release the alternate,
4196 sctp_free_remote_addr(stcb->asoc.alternate);
4197 stcb->asoc.alternate = NULL;
4200 if (net->dest_state & SCTP_ADDR_PF) {
4201 net->dest_state &= ~SCTP_ADDR_PF;
4202 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4203 stcb->sctp_ep, stcb, net,
4204 SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4205 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4206 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4207 /* Done with this net */
4210 /* restore any doubled timers */
4211 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4212 if (net->RTO < stcb->asoc.minrto) {
4213 net->RTO = stcb->asoc.minrto;
4215 if (net->RTO > stcb->asoc.maxrto) {
4216 net->RTO = stcb->asoc.maxrto;
4220 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4222 asoc->last_acked_seq = cumack;
4224 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4225 /* nothing left in-flight */
4226 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4227 net->flight_size = 0;
4228 net->partial_bytes_acked = 0;
4230 asoc->total_flight = 0;
4231 asoc->total_flight_count = 0;
4235 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4236 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4237 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4238 /* SWS sender side engages */
4239 asoc->peers_rwnd = 0;
4241 if (asoc->peers_rwnd > old_rwnd) {
4242 win_probe_recovery = 1;
4244 /* Now assure a timer where data is queued at */
4247 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4248 if (win_probe_recovery && (net->window_probe)) {
4249 win_probe_recovered = 1;
4251 * Find first chunk that was used with window probe
4252 * and clear the sent
4254 /* sa_ignore FREED_MEMORY */
4255 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4256 if (tp1->window_probe) {
4257 /* move back to data send queue */
4258 sctp_window_probe_recovery(stcb, asoc, tp1);
4263 if (net->flight_size) {
4265 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4266 if (net->window_probe) {
4267 net->window_probe = 0;
4270 if (net->window_probe) {
4272 * In window probes we must assure a timer
4273 * is still running there
4275 net->window_probe = 0;
4276 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4277 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4279 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4280 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4282 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4287 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4288 (asoc->sent_queue_retran_cnt == 0) &&
4289 (win_probe_recovered == 0) &&
4292 * huh, this should not happen unless all packets are
4293 * PR-SCTP and marked to skip of course.
4295 if (sctp_fs_audit(asoc)) {
4296 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4297 net->flight_size = 0;
4299 asoc->total_flight = 0;
4300 asoc->total_flight_count = 0;
4301 asoc->sent_queue_retran_cnt = 0;
4302 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4303 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4304 sctp_flight_size_increase(tp1);
4305 sctp_total_flight_increase(stcb, tp1);
4306 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4307 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4314 /**********************************/
4315 /* Now what about shutdown issues */
4316 /**********************************/
4317 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4318 /* nothing left on sendqueue.. consider done */
4320 if ((asoc->stream_queue_cnt == 1) &&
4321 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4322 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4323 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4324 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4326 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4327 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4328 (asoc->stream_queue_cnt == 1) &&
4329 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4330 struct mbuf *op_err;
4334 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4335 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4336 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4339 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4340 (asoc->stream_queue_cnt == 0)) {
4341 struct sctp_nets *netp;
4343 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4344 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4345 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4347 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4348 sctp_stop_timers_for_shutdown(stcb);
4349 if (asoc->alternate) {
4350 netp = asoc->alternate;
4352 netp = asoc->primary_destination;
4354 sctp_send_shutdown(stcb, netp);
4355 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4356 stcb->sctp_ep, stcb, netp);
4357 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4358 stcb->sctp_ep, stcb, NULL);
4359 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4360 (asoc->stream_queue_cnt == 0)) {
4361 struct sctp_nets *netp;
4363 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4364 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4365 sctp_stop_timers_for_shutdown(stcb);
4366 if (asoc->alternate) {
4367 netp = asoc->alternate;
4369 netp = asoc->primary_destination;
4371 sctp_send_shutdown_ack(stcb, netp);
4372 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4373 stcb->sctp_ep, stcb, netp);
4376 /*********************************************/
4377 /* Here we perform PR-SCTP procedures */
4379 /*********************************************/
4380 /* C1. update advancedPeerAckPoint */
4381 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4382 asoc->advanced_peer_ack_point = cumack;
4384 /* PR-Sctp issues need to be addressed too */
4385 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4386 struct sctp_tmit_chunk *lchk;
4387 uint32_t old_adv_peer_ack_point;
4389 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4390 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4391 /* C3. See if we need to send a Fwd-TSN */
4392 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4394 * ISSUE with ECN, see FWD-TSN processing.
4396 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4397 send_forward_tsn(stcb, asoc);
4399 /* try to FR fwd-tsn's that get lost too */
4400 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4401 send_forward_tsn(stcb, asoc);
4405 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4406 if (lchk->whoTo != NULL) {
4411 /* Assure a timer is up */
4412 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4413 stcb->sctp_ep, stcb, lchk->whoTo);
4416 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4417 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4419 stcb->asoc.peers_rwnd,
4420 stcb->asoc.total_flight,
4421 stcb->asoc.total_output_queue_size);
4426 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4427 struct sctp_tcb *stcb,
4428 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4429 int *abort_now, uint8_t flags,
4430 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4432 struct sctp_association *asoc;
4433 struct sctp_tmit_chunk *tp1, *tp2;
4434 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4435 uint16_t wake_him = 0;
4436 uint32_t send_s = 0;
4438 int accum_moved = 0;
4439 int will_exit_fast_recovery = 0;
4440 uint32_t a_rwnd, old_rwnd;
4441 int win_probe_recovery = 0;
4442 int win_probe_recovered = 0;
4443 struct sctp_nets *net = NULL;
4446 uint8_t reneged_all = 0;
4447 uint8_t cmt_dac_flag;
4450 * we take any chance we can to service our queues since we cannot
4451 * get awoken when the socket is read from :<
4454 * Now perform the actual SACK handling: 1) Verify that it is not an
4455 * old sack, if so discard. 2) If there is nothing left in the send
4456 * queue (cum-ack is equal to last acked) then you have a duplicate
4457 * too, update any rwnd change and verify no timers are running.
4458 * then return. 3) Process any new consequtive data i.e. cum-ack
4459 * moved process these first and note that it moved. 4) Process any
4460 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4461 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4462 * sync up flightsizes and things, stop all timers and also check
4463 * for shutdown_pending state. If so then go ahead and send off the
4464 * shutdown. If in shutdown recv, send off the shutdown-ack and
4465 * start that timer, Ret. 9) Strike any non-acked things and do FR
4466 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4467 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4468 * if in shutdown_recv state.
4470 SCTP_TCB_LOCK_ASSERT(stcb);
4472 this_sack_lowest_newack = 0;
4473 SCTP_STAT_INCR(sctps_slowpath_sack);
4475 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4476 #ifdef SCTP_ASOCLOG_OF_TSNS
4477 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4478 stcb->asoc.cumack_log_at++;
4479 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4480 stcb->asoc.cumack_log_at = 0;
4485 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4486 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4487 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4490 old_rwnd = stcb->asoc.peers_rwnd;
4491 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4492 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4493 stcb->asoc.overall_error_count,
4495 SCTP_FROM_SCTP_INDATA,
4498 stcb->asoc.overall_error_count = 0;
4500 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4501 sctp_log_sack(asoc->last_acked_seq,
4508 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4510 uint32_t *dupdata, dblock;
4512 for (i = 0; i < num_dup; i++) {
4513 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4514 sizeof(uint32_t), (uint8_t *)&dblock);
4515 if (dupdata == NULL) {
4518 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4522 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4523 tp1 = TAILQ_LAST(&asoc->sent_queue,
4524 sctpchunk_listhead);
4525 send_s = tp1->rec.data.tsn + 1;
4528 send_s = asoc->sending_seq;
4530 if (SCTP_TSN_GE(cum_ack, send_s)) {
4531 struct mbuf *op_err;
4532 char msg[SCTP_DIAG_INFO_LEN];
4535 * no way, we have not even sent this TSN out yet. Peer is
4536 * hopelessly messed up with us.
4538 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4541 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4542 tp1->rec.data.tsn, (void *)tp1);
4547 SCTP_SNPRINTF(msg, sizeof(msg),
4548 "Cum ack %8.8x greater or equal than TSN %8.8x",
4550 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4551 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4552 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4555 /**********************/
4556 /* 1) check the range */
4557 /**********************/
4558 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4559 /* acking something behind */
4563 /* update the Rwnd of the peer */
4564 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4565 TAILQ_EMPTY(&asoc->send_queue) &&
4566 (asoc->stream_queue_cnt == 0)) {
4567 /* nothing left on send/sent and strmq */
4568 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4569 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4570 asoc->peers_rwnd, 0, 0, a_rwnd);
4572 asoc->peers_rwnd = a_rwnd;
4573 if (asoc->sent_queue_retran_cnt) {
4574 asoc->sent_queue_retran_cnt = 0;
4576 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4577 /* SWS sender side engages */
4578 asoc->peers_rwnd = 0;
4580 /* stop any timers */
4581 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4582 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4583 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4584 net->partial_bytes_acked = 0;
4585 net->flight_size = 0;
4587 asoc->total_flight = 0;
4588 asoc->total_flight_count = 0;
4592 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4593 * things. The total byte count acked is tracked in netAckSz AND
4594 * netAck2 is used to track the total bytes acked that are un-
4595 * amibguious and were never retransmitted. We track these on a per
4596 * destination address basis.
4598 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4599 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4600 /* Drag along the window_tsn for cwr's */
4601 net->cwr_window_tsn = cum_ack;
4603 net->prev_cwnd = net->cwnd;
4608 * CMT: Reset CUC and Fast recovery algo variables before
4611 net->new_pseudo_cumack = 0;
4612 net->will_exit_fast_recovery = 0;
4613 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4614 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4618 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4619 * to be greater than the cumack. Also reset saw_newack to 0
4622 net->saw_newack = 0;
4623 net->this_sack_highest_newack = last_tsn;
4625 /* process the new consecutive TSN first */
4626 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4627 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4628 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4630 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4632 * If it is less than ACKED, it is
4633 * now no-longer in flight. Higher
4634 * values may occur during marking
4636 if ((tp1->whoTo->dest_state &
4637 SCTP_ADDR_UNCONFIRMED) &&
4638 (tp1->snd_count < 2)) {
4640 * If there was no retran
4641 * and the address is
4642 * un-confirmed and we sent
4644 * sacked.. its confirmed,
4647 tp1->whoTo->dest_state &=
4648 ~SCTP_ADDR_UNCONFIRMED;
4650 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4651 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4652 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4653 tp1->whoTo->flight_size,
4655 (uint32_t)(uintptr_t)tp1->whoTo,
4658 sctp_flight_size_decrease(tp1);
4659 sctp_total_flight_decrease(stcb, tp1);
4660 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4661 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4665 tp1->whoTo->net_ack += tp1->send_size;
4667 /* CMT SFR and DAC algos */
4668 this_sack_lowest_newack = tp1->rec.data.tsn;
4669 tp1->whoTo->saw_newack = 1;
4671 if (tp1->snd_count < 2) {
4673 * True non-retransmitted
4676 tp1->whoTo->net_ack2 +=
4679 /* update RTO too? */
4682 sctp_calculate_rto(stcb,
4685 &tp1->sent_rcv_time,
4686 SCTP_RTT_FROM_DATA)) {
4689 if (tp1->whoTo->rto_needed == 0) {
4690 tp1->whoTo->rto_needed = 1;
4696 * CMT: CUCv2 algorithm. From the
4697 * cumack'd TSNs, for each TSN being
4698 * acked for the first time, set the
4699 * following variables for the
4700 * corresp destination.
4701 * new_pseudo_cumack will trigger a
4703 * find_(rtx_)pseudo_cumack will
4704 * trigger search for the next
4705 * expected (rtx-)pseudo-cumack.
4707 tp1->whoTo->new_pseudo_cumack = 1;
4708 tp1->whoTo->find_pseudo_cumack = 1;
4709 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4712 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4713 sctp_log_sack(asoc->last_acked_seq,
4718 SCTP_LOG_TSN_ACKED);
4720 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4721 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4724 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4725 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4726 #ifdef SCTP_AUDITING_ENABLED
4727 sctp_audit_log(0xB3,
4728 (asoc->sent_queue_retran_cnt & 0x000000ff));
4731 if (tp1->rec.data.chunk_was_revoked) {
4732 /* deflate the cwnd */
4733 tp1->whoTo->cwnd -= tp1->book_size;
4734 tp1->rec.data.chunk_was_revoked = 0;
4736 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4737 tp1->sent = SCTP_DATAGRAM_ACKED;
4744 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4745 /* always set this up to cum-ack */
4746 asoc->this_sack_highest_gap = last_tsn;
4748 if ((num_seg > 0) || (num_nr_seg > 0)) {
4751 * thisSackHighestGap will increase while handling NEW
4752 * segments this_sack_highest_newack will increase while
4753 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4754 * used for CMT DAC algo. saw_newack will also change.
4756 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4757 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4758 num_seg, num_nr_seg, &rto_ok)) {
4762 * validate the biggest_tsn_acked in the gap acks if strict
4763 * adherence is wanted.
4765 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4767 * peer is either confused or we are under attack.
4770 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4771 biggest_tsn_acked, send_s);
4775 /*******************************************/
4776 /* cancel ALL T3-send timer if accum moved */
4777 /*******************************************/
4778 if (asoc->sctp_cmt_on_off > 0) {
4779 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4780 if (net->new_pseudo_cumack)
4781 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4783 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4788 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4789 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4790 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4794 /********************************************/
4795 /* drop the acked chunks from the sentqueue */
4796 /********************************************/
4797 asoc->last_acked_seq = cum_ack;
4799 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4800 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4803 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4804 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4805 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4808 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4812 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4813 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4814 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4815 asoc->trigger_reset = 1;
4817 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4818 if (PR_SCTP_ENABLED(tp1->flags)) {
4819 if (asoc->pr_sctp_cnt != 0)
4820 asoc->pr_sctp_cnt--;
4822 asoc->sent_queue_cnt--;
4824 /* sa_ignore NO_NULL_CHK */
4825 sctp_free_bufspace(stcb, asoc, tp1, 1);
4826 sctp_m_freem(tp1->data);
4828 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4829 asoc->sent_queue_cnt_removeable--;
4832 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4833 sctp_log_sack(asoc->last_acked_seq,
4838 SCTP_LOG_FREE_SENT);
4840 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4843 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4845 panic("Warning flight size is positive and should be 0");
4847 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4848 asoc->total_flight);
4850 asoc->total_flight = 0;
4853 /* sa_ignore NO_NULL_CHK */
4854 if ((wake_him) && (stcb->sctp_socket)) {
4855 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4856 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4857 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4859 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4861 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4862 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4866 if (asoc->fast_retran_loss_recovery && accum_moved) {
4867 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4868 /* Setup so we will exit RFC2582 fast recovery */
4869 will_exit_fast_recovery = 1;
4873 * Check for revoked fragments:
4875 * if Previous sack - Had no frags then we can't have any revoked if
4876 * Previous sack - Had frag's then - If we now have frags aka
4877 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4878 * some of them. else - The peer revoked all ACKED fragments, since
4879 * we had some before and now we have NONE.
4883 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4884 asoc->saw_sack_with_frags = 1;
4885 } else if (asoc->saw_sack_with_frags) {
4886 int cnt_revoked = 0;
4888 /* Peer revoked all dg's marked or acked */
4889 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4890 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4891 tp1->sent = SCTP_DATAGRAM_SENT;
4892 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4893 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4894 tp1->whoTo->flight_size,
4896 (uint32_t)(uintptr_t)tp1->whoTo,
4899 sctp_flight_size_increase(tp1);
4900 sctp_total_flight_increase(stcb, tp1);
4901 tp1->rec.data.chunk_was_revoked = 1;
4903 * To ensure that this increase in
4904 * flightsize, which is artificial, does not
4905 * throttle the sender, we also increase the
4906 * cwnd artificially.
4908 tp1->whoTo->cwnd += tp1->book_size;
4915 asoc->saw_sack_with_frags = 0;
4918 asoc->saw_sack_with_nr_frags = 1;
4920 asoc->saw_sack_with_nr_frags = 0;
4922 /* JRS - Use the congestion control given in the CC module */
4923 if (ecne_seen == 0) {
4924 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4925 if (net->net_ack2 > 0) {
4927 * Karn's rule applies to clearing error
4928 * count, this is optional.
4930 net->error_count = 0;
4931 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4932 /* addr came good */
4933 net->dest_state |= SCTP_ADDR_REACHABLE;
4934 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4935 0, (void *)net, SCTP_SO_NOT_LOCKED);
4938 if (net == stcb->asoc.primary_destination) {
4939 if (stcb->asoc.alternate) {
4941 * release the alternate,
4944 sctp_free_remote_addr(stcb->asoc.alternate);
4945 stcb->asoc.alternate = NULL;
4949 if (net->dest_state & SCTP_ADDR_PF) {
4950 net->dest_state &= ~SCTP_ADDR_PF;
4951 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4952 stcb->sctp_ep, stcb, net,
4953 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4954 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4955 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4956 /* Done with this net */
4959 /* restore any doubled timers */
4960 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4961 if (net->RTO < stcb->asoc.minrto) {
4962 net->RTO = stcb->asoc.minrto;
4964 if (net->RTO > stcb->asoc.maxrto) {
4965 net->RTO = stcb->asoc.maxrto;
4969 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4972 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4973 /* nothing left in-flight */
4974 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4975 /* stop all timers */
4976 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4978 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4979 net->flight_size = 0;
4980 net->partial_bytes_acked = 0;
4982 asoc->total_flight = 0;
4983 asoc->total_flight_count = 0;
4986 /**********************************/
4987 /* Now what about shutdown issues */
4988 /**********************************/
4989 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4990 /* nothing left on sendqueue.. consider done */
4991 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4992 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4993 asoc->peers_rwnd, 0, 0, a_rwnd);
4995 asoc->peers_rwnd = a_rwnd;
4996 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4997 /* SWS sender side engages */
4998 asoc->peers_rwnd = 0;
5001 if ((asoc->stream_queue_cnt == 1) &&
5002 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5003 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5004 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5005 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5007 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5008 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5009 (asoc->stream_queue_cnt == 1) &&
5010 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5011 struct mbuf *op_err;
5015 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5016 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_34;
5017 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5020 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5021 (asoc->stream_queue_cnt == 0)) {
5022 struct sctp_nets *netp;
5024 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5025 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5026 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5028 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5029 sctp_stop_timers_for_shutdown(stcb);
5030 if (asoc->alternate) {
5031 netp = asoc->alternate;
5033 netp = asoc->primary_destination;
5035 sctp_send_shutdown(stcb, netp);
5036 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5037 stcb->sctp_ep, stcb, netp);
5038 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5039 stcb->sctp_ep, stcb, NULL);
5041 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5042 (asoc->stream_queue_cnt == 0)) {
5043 struct sctp_nets *netp;
5045 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5046 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5047 sctp_stop_timers_for_shutdown(stcb);
5048 if (asoc->alternate) {
5049 netp = asoc->alternate;
5051 netp = asoc->primary_destination;
5053 sctp_send_shutdown_ack(stcb, netp);
5054 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5055 stcb->sctp_ep, stcb, netp);
5060 * Now here we are going to recycle net_ack for a different use...
5063 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5068 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5069 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5070 * automatically ensure that.
5072 if ((asoc->sctp_cmt_on_off > 0) &&
5073 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5074 (cmt_dac_flag == 0)) {
5075 this_sack_lowest_newack = cum_ack;
5077 if ((num_seg > 0) || (num_nr_seg > 0)) {
5078 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5079 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5081 /* JRS - Use the congestion control given in the CC module */
5082 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5084 /* Now are we exiting loss recovery ? */
5085 if (will_exit_fast_recovery) {
5086 /* Ok, we must exit fast recovery */
5087 asoc->fast_retran_loss_recovery = 0;
5089 if ((asoc->sat_t3_loss_recovery) &&
5090 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5091 /* end satellite t3 loss recovery */
5092 asoc->sat_t3_loss_recovery = 0;
5097 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5098 if (net->will_exit_fast_recovery) {
5099 /* Ok, we must exit fast recovery */
5100 net->fast_retran_loss_recovery = 0;
5104 /* Adjust and set the new rwnd value */
5105 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5106 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5107 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5109 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5110 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5111 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5112 /* SWS sender side engages */
5113 asoc->peers_rwnd = 0;
5115 if (asoc->peers_rwnd > old_rwnd) {
5116 win_probe_recovery = 1;
5120 * Now we must setup so we have a timer up for anyone with
5126 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5127 if (win_probe_recovery && (net->window_probe)) {
5128 win_probe_recovered = 1;
5130 * Find first chunk that was used with
5131 * window probe and clear the event. Put
5132 * it back into the send queue as if has
5135 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5136 if (tp1->window_probe) {
5137 sctp_window_probe_recovery(stcb, asoc, tp1);
5142 if (net->flight_size) {
5144 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5145 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5146 stcb->sctp_ep, stcb, net);
5148 if (net->window_probe) {
5149 net->window_probe = 0;
5152 if (net->window_probe) {
5154 * In window probes we must assure a timer
5155 * is still running there
5157 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5158 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5159 stcb->sctp_ep, stcb, net);
5162 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5163 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5165 SCTP_FROM_SCTP_INDATA + SCTP_LOC_35);
5170 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5171 (asoc->sent_queue_retran_cnt == 0) &&
5172 (win_probe_recovered == 0) &&
5175 * huh, this should not happen unless all packets are
5176 * PR-SCTP and marked to skip of course.
5178 if (sctp_fs_audit(asoc)) {
5179 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5180 net->flight_size = 0;
5182 asoc->total_flight = 0;
5183 asoc->total_flight_count = 0;
5184 asoc->sent_queue_retran_cnt = 0;
5185 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5186 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5187 sctp_flight_size_increase(tp1);
5188 sctp_total_flight_increase(stcb, tp1);
5189 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5190 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5197 /*********************************************/
5198 /* Here we perform PR-SCTP procedures */
5200 /*********************************************/
5201 /* C1. update advancedPeerAckPoint */
5202 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5203 asoc->advanced_peer_ack_point = cum_ack;
5205 /* C2. try to further move advancedPeerAckPoint ahead */
5206 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5207 struct sctp_tmit_chunk *lchk;
5208 uint32_t old_adv_peer_ack_point;
5210 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5211 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5212 /* C3. See if we need to send a Fwd-TSN */
5213 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5215 * ISSUE with ECN, see FWD-TSN processing.
5217 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5218 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5219 0xee, cum_ack, asoc->advanced_peer_ack_point,
5220 old_adv_peer_ack_point);
5222 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5223 send_forward_tsn(stcb, asoc);
5225 /* try to FR fwd-tsn's that get lost too */
5226 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5227 send_forward_tsn(stcb, asoc);
5231 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5232 if (lchk->whoTo != NULL) {
5237 /* Assure a timer is up */
5238 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5239 stcb->sctp_ep, stcb, lchk->whoTo);
5242 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5243 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5245 stcb->asoc.peers_rwnd,
5246 stcb->asoc.total_flight,
5247 stcb->asoc.total_output_queue_size);
5252 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5255 uint32_t cum_ack, a_rwnd;
5257 cum_ack = ntohl(cp->cumulative_tsn_ack);
5258 /* Arrange so a_rwnd does NOT change */
5259 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5261 /* Now call the express sack handling */
5262 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5266 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5267 struct sctp_stream_in *strmin)
5269 struct sctp_queued_to_read *control, *ncontrol;
5270 struct sctp_association *asoc;
5272 int need_reasm_check = 0;
5275 mid = strmin->last_mid_delivered;
5277 * First deliver anything prior to and including the stream no that
5280 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5281 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5282 /* this is deliverable now */
5283 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5284 if (control->on_strm_q) {
5285 if (control->on_strm_q == SCTP_ON_ORDERED) {
5286 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5287 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5288 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5291 panic("strmin: %p ctl: %p unknown %d",
5292 strmin, control, control->on_strm_q);
5295 control->on_strm_q = 0;
5297 /* subtract pending on streams */
5298 if (asoc->size_on_all_streams >= control->length) {
5299 asoc->size_on_all_streams -= control->length;
5302 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5304 asoc->size_on_all_streams = 0;
5307 sctp_ucount_decr(asoc->cnt_on_all_streams);
5308 /* deliver it to at least the delivery-q */
5309 if (stcb->sctp_socket) {
5310 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5311 sctp_add_to_readq(stcb->sctp_ep, stcb,
5313 &stcb->sctp_socket->so_rcv,
5314 1, SCTP_READ_LOCK_HELD,
5315 SCTP_SO_NOT_LOCKED);
5318 /* Its a fragmented message */
5319 if (control->first_frag_seen) {
5321 * Make it so this is next to
5322 * deliver, we restore later
5324 strmin->last_mid_delivered = control->mid - 1;
5325 need_reasm_check = 1;
5330 /* no more delivery now. */
5334 if (need_reasm_check) {
5337 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5338 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5339 /* Restore the next to deliver unless we are ahead */
5340 strmin->last_mid_delivered = mid;
5343 /* Left the front Partial one on */
5346 need_reasm_check = 0;
5349 * now we must deliver things in queue the normal way if any are
5352 mid = strmin->last_mid_delivered + 1;
5353 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5354 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5355 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5356 /* this is deliverable now */
5357 if (control->on_strm_q) {
5358 if (control->on_strm_q == SCTP_ON_ORDERED) {
5359 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5360 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5361 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5364 panic("strmin: %p ctl: %p unknown %d",
5365 strmin, control, control->on_strm_q);
5368 control->on_strm_q = 0;
5370 /* subtract pending on streams */
5371 if (asoc->size_on_all_streams >= control->length) {
5372 asoc->size_on_all_streams -= control->length;
5375 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5377 asoc->size_on_all_streams = 0;
5380 sctp_ucount_decr(asoc->cnt_on_all_streams);
5381 /* deliver it to at least the delivery-q */
5382 strmin->last_mid_delivered = control->mid;
5383 if (stcb->sctp_socket) {
5384 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5385 sctp_add_to_readq(stcb->sctp_ep, stcb,
5387 &stcb->sctp_socket->so_rcv, 1,
5388 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5391 mid = strmin->last_mid_delivered + 1;
5393 /* Its a fragmented message */
5394 if (control->first_frag_seen) {
5396 * Make it so this is next to
5399 strmin->last_mid_delivered = control->mid - 1;
5400 need_reasm_check = 1;
5408 if (need_reasm_check) {
5409 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5416 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5417 struct sctp_association *asoc, struct sctp_stream_in *strm,
5418 struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5420 struct sctp_tmit_chunk *chk, *nchk;
5421 int cnt_removed = 0;
5424 * For now large messages held on the stream reasm that are complete
5425 * will be tossed too. We could in theory do more work to spin
5426 * through and stop after dumping one msg aka seeing the start of a
5427 * new msg at the head, and call the delivery function... to see if
5428 * it can be delivered... But for now we just dump everything on the
5431 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5434 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5435 /* Purge hanging chunks */
5436 if (!asoc->idata_supported && (ordered == 0)) {
5437 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5442 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5443 if (asoc->size_on_reasm_queue >= chk->send_size) {
5444 asoc->size_on_reasm_queue -= chk->send_size;
5447 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5449 asoc->size_on_reasm_queue = 0;
5452 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5454 sctp_m_freem(chk->data);
5457 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5459 if (!TAILQ_EMPTY(&control->reasm)) {
5460 /* This has to be old data, unordered */
5461 if (control->data) {
5462 sctp_m_freem(control->data);
5463 control->data = NULL;
5465 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5466 chk = TAILQ_FIRST(&control->reasm);
5467 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5468 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5469 sctp_add_chk_to_control(control, strm, stcb, asoc,
5470 chk, SCTP_READ_LOCK_HELD);
5472 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5475 if (control->on_strm_q == SCTP_ON_ORDERED) {
5476 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5477 if (asoc->size_on_all_streams >= control->length) {
5478 asoc->size_on_all_streams -= control->length;
5481 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5483 asoc->size_on_all_streams = 0;
5486 sctp_ucount_decr(asoc->cnt_on_all_streams);
5487 control->on_strm_q = 0;
5488 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5489 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5490 control->on_strm_q = 0;
5492 } else if (control->on_strm_q) {
5493 panic("strm: %p ctl: %p unknown %d",
5494 strm, control, control->on_strm_q);
5497 control->on_strm_q = 0;
5498 if (control->on_read_q == 0) {
5499 sctp_free_remote_addr(control->whoFrom);
5500 if (control->data) {
5501 sctp_m_freem(control->data);
5502 control->data = NULL;
5504 sctp_free_a_readq(stcb, control);
5509 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5510 struct sctp_forward_tsn_chunk *fwd,
5511 int *abort_flag, struct mbuf *m, int offset)
5513 /* The pr-sctp fwd tsn */
5515 * here we will perform all the data receiver side steps for
5516 * processing FwdTSN, as required in by pr-sctp draft:
5518 * Assume we get FwdTSN(x):
5520 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5521 * + others we have 3) examine and update re-ordering queue on
5522 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5523 * report where we are.
5525 struct sctp_association *asoc;
5526 uint32_t new_cum_tsn, gap;
5527 unsigned int i, fwd_sz, m_size;
5529 struct sctp_stream_in *strm;
5530 struct sctp_queued_to_read *control, *sv;
5533 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5534 SCTPDBG(SCTP_DEBUG_INDATA1,
5535 "Bad size too small/big fwd-tsn\n");
5538 m_size = (stcb->asoc.mapping_array_size << 3);
5539 /*************************************************************/
5540 /* 1. Here we update local cumTSN and shift the bitmap array */
5541 /*************************************************************/
5542 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5544 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5545 /* Already got there ... */
5549 * now we know the new TSN is more advanced, let's find the actual
5552 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5553 asoc->cumulative_tsn = new_cum_tsn;
5554 if (gap >= m_size) {
5555 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5556 struct mbuf *op_err;
5557 char msg[SCTP_DIAG_INFO_LEN];
5560 * out of range (of single byte chunks in the rwnd I
5561 * give out). This must be an attacker.
5564 SCTP_SNPRINTF(msg, sizeof(msg),
5565 "New cum ack %8.8x too high, highest TSN %8.8x",
5566 new_cum_tsn, asoc->highest_tsn_inside_map);
5567 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5568 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_36;
5569 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5572 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5574 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5575 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5576 asoc->highest_tsn_inside_map = new_cum_tsn;
5578 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5579 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5582 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5585 SCTP_TCB_LOCK_ASSERT(stcb);
5586 for (i = 0; i <= gap; i++) {
5587 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5588 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5589 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5590 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5591 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5596 /*************************************************************/
5597 /* 2. Clear up re-assembly queue */
5598 /*************************************************************/
5600 /* This is now done as part of clearing up the stream/seq */
5601 if (asoc->idata_supported == 0) {
5604 /* Flush all the un-ordered data based on cum-tsn */
5605 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5606 for (sid = 0; sid < asoc->streamincnt; sid++) {
5607 strm = &asoc->strmin[sid];
5608 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5609 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5612 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5614 /*******************************************************/
5615 /* 3. Update the PR-stream re-ordering queues and fix */
5616 /* delivery issues as needed. */
5617 /*******************************************************/
5618 fwd_sz -= sizeof(*fwd);
5621 unsigned int num_str;
5624 uint16_t ordered, flags;
5625 struct sctp_strseq *stseq, strseqbuf;
5626 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5628 offset += sizeof(*fwd);
5630 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5631 if (asoc->idata_supported) {
5632 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5634 num_str = fwd_sz / sizeof(struct sctp_strseq);
5636 for (i = 0; i < num_str; i++) {
5637 if (asoc->idata_supported) {
5638 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5639 sizeof(struct sctp_strseq_mid),
5640 (uint8_t *)&strseqbuf_m);
5641 offset += sizeof(struct sctp_strseq_mid);
5642 if (stseq_m == NULL) {
5645 sid = ntohs(stseq_m->sid);
5646 mid = ntohl(stseq_m->mid);
5647 flags = ntohs(stseq_m->flags);
5648 if (flags & PR_SCTP_UNORDERED_FLAG) {
5654 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5655 sizeof(struct sctp_strseq),
5656 (uint8_t *)&strseqbuf);
5657 offset += sizeof(struct sctp_strseq);
5658 if (stseq == NULL) {
5661 sid = ntohs(stseq->sid);
5662 mid = (uint32_t)ntohs(stseq->ssn);
5670 * Ok we now look for the stream/seq on the read
5671 * queue where its not all delivered. If we find it
5672 * we transmute the read entry into a PDI_ABORTED.
5674 if (sid >= asoc->streamincnt) {
5675 /* screwed up streams, stop! */
5678 if ((asoc->str_of_pdapi == sid) &&
5679 (asoc->ssn_of_pdapi == mid)) {
5681 * If this is the one we were partially
5682 * delivering now then we no longer are.
5683 * Note this will change with the reassembly
5686 asoc->fragmented_delivery_inprogress = 0;
5688 strm = &asoc->strmin[sid];
5690 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
5691 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5692 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5696 if (asoc->idata_supported) {
5697 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
5698 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5699 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5703 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5704 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5708 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5709 if ((control->sinfo_stream == sid) &&
5710 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5711 str_seq = (sid << 16) | (0x0000ffff & mid);
5712 control->pdapi_aborted = 1;
5713 sv = stcb->asoc.control_pdapi;
5714 control->end_added = 1;
5715 if (control->on_strm_q == SCTP_ON_ORDERED) {
5716 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5717 if (asoc->size_on_all_streams >= control->length) {
5718 asoc->size_on_all_streams -= control->length;
5721 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5723 asoc->size_on_all_streams = 0;
5726 sctp_ucount_decr(asoc->cnt_on_all_streams);
5727 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5728 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5730 } else if (control->on_strm_q) {
5731 panic("strm: %p ctl: %p unknown %d",
5732 strm, control, control->on_strm_q);
5735 control->on_strm_q = 0;
5736 stcb->asoc.control_pdapi = control;
5737 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5739 SCTP_PARTIAL_DELIVERY_ABORTED,
5741 SCTP_SO_NOT_LOCKED);
5742 stcb->asoc.control_pdapi = sv;
5744 } else if ((control->sinfo_stream == sid) &&
5745 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5746 /* We are past our victim SSN */
5750 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5751 /* Update the sequence number */
5752 strm->last_mid_delivered = mid;
5754 /* now kick the stream the new way */
5755 /* sa_ignore NO_NULL_CHK */
5756 sctp_kick_prsctp_reorder_queue(stcb, strm);
5758 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5761 * Now slide thing forward.
5763 sctp_slide_mapping_arrays(stcb);