2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int lock_held);
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
78 /* Calculate what the rwnd would be */
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
85 * This is really set wrong with respect to a 1-2-m socket. Since
86 * the sb_cc is the count that everyone as put up. When we re-write
87 * sctp_soreceive then we will fix this so that ONLY this
88 * associations data is taken into account.
90 if (stcb->sctp_socket == NULL) {
94 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 if (stcb->asoc.sb_cc == 0 &&
99 asoc->cnt_on_reasm_queue == 0 &&
100 asoc->cnt_on_all_streams == 0) {
101 /* Full rwnd granted */
102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
105 /* get actual space */
106 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
108 * take out what has NOT been put on socket queue and we yet hold
111 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 asoc->cnt_on_reasm_queue * MSIZE));
113 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 asoc->cnt_on_all_streams * MSIZE));
120 /* what is the overhead of all these rwnd's */
121 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
123 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 * even it is 0. SWS engaged
126 if (calc < stcb->asoc.my_rwnd_control_len) {
135 * Build out our readq entry based on the incoming packet.
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139 struct sctp_nets *net,
140 uint32_t tsn, uint32_t ppid,
141 uint32_t context, uint16_t sid,
142 uint32_t mid, uint8_t flags,
145 struct sctp_queued_to_read *read_queue_e = NULL;
147 sctp_alloc_a_readq(stcb, read_queue_e);
148 if (read_queue_e == NULL) {
151 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 read_queue_e->sinfo_stream = sid;
153 read_queue_e->sinfo_flags = (flags << 8);
154 read_queue_e->sinfo_ppid = ppid;
155 read_queue_e->sinfo_context = context;
156 read_queue_e->sinfo_tsn = tsn;
157 read_queue_e->sinfo_cumtsn = tsn;
158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 read_queue_e->mid = mid;
160 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 TAILQ_INIT(&read_queue_e->reasm);
162 read_queue_e->whoFrom = net;
163 atomic_add_int(&net->ref_count, 1);
164 read_queue_e->data = dm;
165 read_queue_e->stcb = stcb;
166 read_queue_e->port_from = stcb->rport;
168 return (read_queue_e);
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
174 struct sctp_extrcvinfo *seinfo;
175 struct sctp_sndrcvinfo *outinfo;
176 struct sctp_rcvinfo *rcvinfo;
177 struct sctp_nxtinfo *nxtinfo;
184 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 /* user does not want any ancillary data */
192 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
195 seinfo = (struct sctp_extrcvinfo *)sinfo;
196 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
199 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
206 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
209 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
215 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
220 SCTP_BUF_LEN(ret) = 0;
222 /* We need a CMSG header followed by the struct */
223 cmh = mtod(ret, struct cmsghdr *);
225 * Make sure that there is no un-initialized padding between the
226 * cmsg header and cmsg data and after the cmsg data.
229 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 cmh->cmsg_level = IPPROTO_SCTP;
231 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 cmh->cmsg_type = SCTP_RCVINFO;
233 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 rcvinfo->rcv_context = sinfo->sinfo_context;
241 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
246 cmh->cmsg_level = IPPROTO_SCTP;
247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 cmh->cmsg_type = SCTP_NXTINFO;
249 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 nxtinfo->nxt_flags = 0;
252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 nxtinfo->nxt_flags |= SCTP_UNORDERED;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 nxtinfo->nxt_flags |= SCTP_COMPLETE;
261 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
267 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 cmh->cmsg_level = IPPROTO_SCTP;
269 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 cmh->cmsg_type = SCTP_EXTRCV;
273 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 cmh->cmsg_type = SCTP_SNDRCV;
279 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
287 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
289 uint32_t gap, i, cumackp1;
291 int in_r = 0, in_nr = 0;
293 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
296 cumackp1 = asoc->cumulative_tsn + 1;
297 if (SCTP_TSN_GT(cumackp1, tsn)) {
299 * this tsn is behind the cum ack and thus we don't need to
300 * worry about it being moved from one to the other.
304 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
305 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
306 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 if ((in_r == 0) && (in_nr == 0)) {
309 panic("Things are really messed up now");
311 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
312 sctp_print_mapping_array(asoc);
316 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
318 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
319 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
320 asoc->highest_tsn_inside_nr_map = tsn;
322 if (tsn == asoc->highest_tsn_inside_map) {
323 /* We must back down to see what the new highest is */
324 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
325 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 asoc->highest_tsn_inside_map = i;
333 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
339 sctp_place_control_in_stream(struct sctp_stream_in *strm,
340 struct sctp_association *asoc,
341 struct sctp_queued_to_read *control)
343 struct sctp_queued_to_read *at;
344 struct sctp_readhead *q;
345 uint8_t flags, unordered;
347 flags = (control->sinfo_flags >> 8);
348 unordered = flags & SCTP_DATA_UNORDERED;
350 q = &strm->uno_inqueue;
351 if (asoc->idata_supported == 0) {
352 if (!TAILQ_EMPTY(q)) {
354 * Only one stream can be here in old style
359 TAILQ_INSERT_TAIL(q, control, next_instrm);
360 control->on_strm_q = SCTP_ON_UNORDERED;
366 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
367 control->end_added = 1;
368 control->first_frag_seen = 1;
369 control->last_frag_seen = 1;
371 if (TAILQ_EMPTY(q)) {
373 TAILQ_INSERT_HEAD(q, control, next_instrm);
375 control->on_strm_q = SCTP_ON_UNORDERED;
377 control->on_strm_q = SCTP_ON_ORDERED;
381 TAILQ_FOREACH(at, q, next_instrm) {
382 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
384 * one in queue is bigger than the new one,
385 * insert before this one
387 TAILQ_INSERT_BEFORE(at, control, next_instrm);
389 control->on_strm_q = SCTP_ON_UNORDERED;
391 control->on_strm_q = SCTP_ON_ORDERED;
394 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
396 * Gak, He sent me a duplicate msg id
397 * number?? return -1 to abort.
401 if (TAILQ_NEXT(at, next_instrm) == NULL) {
403 * We are at the end, insert it
406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
407 sctp_log_strm_del(control, at,
408 SCTP_STR_LOG_FROM_INSERT_TL);
410 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
412 control->on_strm_q = SCTP_ON_UNORDERED;
414 control->on_strm_q = SCTP_ON_ORDERED;
425 sctp_abort_in_reasm(struct sctp_tcb *stcb,
426 struct sctp_queued_to_read *control,
427 struct sctp_tmit_chunk *chk,
428 int *abort_flag, int opspot)
430 char msg[SCTP_DIAG_INFO_LEN];
433 if (stcb->asoc.idata_supported) {
434 snprintf(msg, sizeof(msg),
435 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
437 control->fsn_included,
440 chk->rec.data.fsn, chk->rec.data.mid);
442 snprintf(msg, sizeof(msg),
443 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
445 control->fsn_included,
449 (uint16_t)chk->rec.data.mid);
451 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
452 sctp_m_freem(chk->data);
454 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
455 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
456 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
461 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
464 * The control could not be placed and must be cleaned.
466 struct sctp_tmit_chunk *chk, *nchk;
468 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
469 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
471 sctp_m_freem(chk->data);
473 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
475 sctp_free_remote_addr(control->whoFrom);
477 sctp_m_freem(control->data);
478 control->data = NULL;
480 sctp_free_a_readq(stcb, control);
484 * Queue the chunk either right into the socket buffer if it is the next one
485 * to go OR put it in the correct place in the delivery queue. If we do
486 * append to the so_buf, keep doing so until we are out of order as
487 * long as the control's entered are non-fragmented.
490 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
491 struct sctp_association *asoc,
492 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
495 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
496 * all the data in one stream this could happen quite rapidly. One
497 * could use the TSN to keep track of things, but this scheme breaks
498 * down in the other type of stream usage that could occur. Send a
499 * single msg to stream 0, send 4Billion messages to stream 1, now
500 * send a message to stream 0. You have a situation where the TSN
501 * has wrapped but not in the stream. Is this worth worrying about
502 * or should we just change our queue sort at the bottom to be by
505 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
506 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
507 * assignment this could happen... and I don't see how this would be
508 * a violation. So for now I am undecided an will leave the sort by
509 * SSN alone. Maybe a hybred approach is the answer
512 struct sctp_queued_to_read *at;
516 struct sctp_stream_in *strm;
517 char msg[SCTP_DIAG_INFO_LEN];
519 strm = &asoc->strmin[control->sinfo_stream];
520 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
521 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
523 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
524 /* The incoming sseq is behind where we last delivered? */
525 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
526 strm->last_mid_delivered, control->mid);
528 * throw it in the stream so it gets cleaned up in
529 * association destruction
531 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
532 if (asoc->idata_supported) {
533 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
534 strm->last_mid_delivered, control->sinfo_tsn,
535 control->sinfo_stream, control->mid);
537 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
538 (uint16_t)strm->last_mid_delivered,
540 control->sinfo_stream,
541 (uint16_t)control->mid);
543 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
544 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
545 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
551 asoc->size_on_all_streams += control->length;
552 sctp_ucount_incr(asoc->cnt_on_all_streams);
553 nxt_todel = strm->last_mid_delivered + 1;
554 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
555 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
558 so = SCTP_INP_SO(stcb->sctp_ep);
559 atomic_add_int(&stcb->asoc.refcnt, 1);
560 SCTP_TCB_UNLOCK(stcb);
561 SCTP_SOCKET_LOCK(so, 1);
563 atomic_subtract_int(&stcb->asoc.refcnt, 1);
564 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
565 SCTP_SOCKET_UNLOCK(so, 1);
569 /* can be delivered right away? */
570 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
571 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
573 /* EY it wont be queued if it could be delivered directly */
575 if (asoc->size_on_all_streams >= control->length) {
576 asoc->size_on_all_streams -= control->length;
579 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
581 asoc->size_on_all_streams = 0;
584 sctp_ucount_decr(asoc->cnt_on_all_streams);
585 strm->last_mid_delivered++;
586 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
587 sctp_add_to_readq(stcb->sctp_ep, stcb,
589 &stcb->sctp_socket->so_rcv, 1,
590 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
591 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
593 nxt_todel = strm->last_mid_delivered + 1;
594 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
595 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
596 if (control->on_strm_q == SCTP_ON_ORDERED) {
597 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
598 if (asoc->size_on_all_streams >= control->length) {
599 asoc->size_on_all_streams -= control->length;
602 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
604 asoc->size_on_all_streams = 0;
607 sctp_ucount_decr(asoc->cnt_on_all_streams);
610 panic("Huh control: %p is on_strm_q: %d",
611 control, control->on_strm_q);
614 control->on_strm_q = 0;
615 strm->last_mid_delivered++;
617 * We ignore the return of deliver_data here
618 * since we always can hold the chunk on the
619 * d-queue. And we have a finite number that
620 * can be delivered from the strq.
622 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
623 sctp_log_strm_del(control, NULL,
624 SCTP_STR_LOG_FROM_IMMED_DEL);
626 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
627 sctp_add_to_readq(stcb->sctp_ep, stcb,
629 &stcb->sctp_socket->so_rcv, 1,
630 SCTP_READ_LOCK_NOT_HELD,
633 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
638 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
639 SCTP_SOCKET_UNLOCK(so, 1);
644 * Ok, we did not deliver this guy, find the correct place
645 * to put it on the queue.
647 if (sctp_place_control_in_stream(strm, asoc, control)) {
648 snprintf(msg, sizeof(msg),
649 "Queue to str MID: %u duplicate",
651 sctp_clean_up_control(stcb, control);
652 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
653 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
654 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
662 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
664 struct mbuf *m, *prev = NULL;
665 struct sctp_tcb *stcb;
667 stcb = control->stcb;
668 control->held_length = 0;
672 if (SCTP_BUF_LEN(m) == 0) {
673 /* Skip mbufs with NO length */
676 control->data = sctp_m_free(m);
679 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
680 m = SCTP_BUF_NEXT(prev);
683 control->tail_mbuf = prev;
688 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
689 if (control->on_read_q) {
691 * On read queue so we must increment the SB stuff,
692 * we assume caller has done any locks of SB.
694 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
696 m = SCTP_BUF_NEXT(m);
699 control->tail_mbuf = prev;
704 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
706 struct mbuf *prev = NULL;
707 struct sctp_tcb *stcb;
709 stcb = control->stcb;
712 panic("Control broken");
717 if (control->tail_mbuf == NULL) {
719 sctp_m_freem(control->data);
721 sctp_setup_tail_pointer(control);
724 control->tail_mbuf->m_next = m;
726 if (SCTP_BUF_LEN(m) == 0) {
727 /* Skip mbufs with NO length */
730 control->tail_mbuf->m_next = sctp_m_free(m);
731 m = control->tail_mbuf->m_next;
733 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
734 m = SCTP_BUF_NEXT(prev);
737 control->tail_mbuf = prev;
742 if (control->on_read_q) {
744 * On read queue so we must increment the SB stuff,
745 * we assume caller has done any locks of SB.
747 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
749 *added += SCTP_BUF_LEN(m);
750 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
751 m = SCTP_BUF_NEXT(m);
754 control->tail_mbuf = prev;
759 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
761 memset(nc, 0, sizeof(struct sctp_queued_to_read));
762 nc->sinfo_stream = control->sinfo_stream;
763 nc->mid = control->mid;
764 TAILQ_INIT(&nc->reasm);
765 nc->top_fsn = control->top_fsn;
766 nc->mid = control->mid;
767 nc->sinfo_flags = control->sinfo_flags;
768 nc->sinfo_ppid = control->sinfo_ppid;
769 nc->sinfo_context = control->sinfo_context;
770 nc->fsn_included = 0xffffffff;
771 nc->sinfo_tsn = control->sinfo_tsn;
772 nc->sinfo_cumtsn = control->sinfo_cumtsn;
773 nc->sinfo_assoc_id = control->sinfo_assoc_id;
774 nc->whoFrom = control->whoFrom;
775 atomic_add_int(&nc->whoFrom->ref_count, 1);
776 nc->stcb = control->stcb;
777 nc->port_from = control->port_from;
781 sctp_reset_a_control(struct sctp_queued_to_read *control,
782 struct sctp_inpcb *inp, uint32_t tsn)
784 control->fsn_included = tsn;
785 if (control->on_read_q) {
787 * We have to purge it from there, hopefully this will work
790 TAILQ_REMOVE(&inp->read_queue, control, next);
791 control->on_read_q = 0;
796 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
797 struct sctp_association *asoc,
798 struct sctp_stream_in *strm,
799 struct sctp_queued_to_read *control,
801 int inp_read_lock_held)
804 * Special handling for the old un-ordered data chunk. All the
805 * chunks/TSN's go to mid 0. So we have to do the old style watching
806 * to see if we have it all. If you return one, no other control
807 * entries on the un-ordered queue will be looked at. In theory
808 * there should be no others entries in reality, unless the guy is
809 * sending both unordered NDATA and unordered DATA...
811 struct sctp_tmit_chunk *chk, *lchk, *tchk;
813 struct sctp_queued_to_read *nc;
816 if (control->first_frag_seen == 0) {
817 /* Nothing we can do, we have not seen the first piece yet */
820 /* Collapse any we can */
823 fsn = control->fsn_included + 1;
824 /* Now what can we add? */
825 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
826 if (chk->rec.data.fsn == fsn) {
828 sctp_alloc_a_readq(stcb, nc);
832 memset(nc, 0, sizeof(struct sctp_queued_to_read));
833 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
834 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
838 if (control->end_added) {
840 if (!TAILQ_EMPTY(&control->reasm)) {
842 * Ok we have to move anything left
843 * on the control queue to a new
846 sctp_build_readq_entry_from_ctl(nc, control);
847 tchk = TAILQ_FIRST(&control->reasm);
848 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
849 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
850 if (asoc->size_on_reasm_queue >= tchk->send_size) {
851 asoc->size_on_reasm_queue -= tchk->send_size;
854 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
856 asoc->size_on_reasm_queue = 0;
859 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
860 nc->first_frag_seen = 1;
861 nc->fsn_included = tchk->rec.data.fsn;
862 nc->data = tchk->data;
863 nc->sinfo_ppid = tchk->rec.data.ppid;
864 nc->sinfo_tsn = tchk->rec.data.tsn;
865 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
867 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
868 sctp_setup_tail_pointer(nc);
869 tchk = TAILQ_FIRST(&control->reasm);
871 /* Spin the rest onto the queue */
873 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
874 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
875 tchk = TAILQ_FIRST(&control->reasm);
878 * Now lets add it to the queue
879 * after removing control
881 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
882 nc->on_strm_q = SCTP_ON_UNORDERED;
883 if (control->on_strm_q) {
884 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
885 control->on_strm_q = 0;
888 if (control->pdapi_started) {
889 strm->pd_api_started = 0;
890 control->pdapi_started = 0;
892 if (control->on_strm_q) {
893 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
894 control->on_strm_q = 0;
895 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
897 if (control->on_read_q == 0) {
898 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
899 &stcb->sctp_socket->so_rcv, control->end_added,
900 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
902 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
903 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
905 * Switch to the new guy and
911 if (nc->on_strm_q == 0) {
912 sctp_free_a_readq(stcb, nc);
917 sctp_free_a_readq(stcb, nc);
924 if (cnt_added && strm->pd_api_started) {
925 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
927 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
928 strm->pd_api_started = 1;
929 control->pdapi_started = 1;
930 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
931 &stcb->sctp_socket->so_rcv, control->end_added,
932 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
933 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
941 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
942 struct sctp_association *asoc,
943 struct sctp_queued_to_read *control,
944 struct sctp_tmit_chunk *chk,
947 struct sctp_tmit_chunk *at;
951 * Here we need to place the chunk into the control structure sorted
952 * in the correct order.
954 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
955 /* Its the very first one. */
956 SCTPDBG(SCTP_DEBUG_XXX,
957 "chunk is a first fsn: %u becomes fsn_included\n",
959 at = TAILQ_FIRST(&control->reasm);
960 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
962 * The first chunk in the reassembly is a smaller
963 * TSN than this one, even though this has a first,
964 * it must be from a subsequent msg.
968 if (control->first_frag_seen) {
970 * In old un-ordered we can reassembly on one
971 * control multiple messages. As long as the next
972 * FIRST is greater then the old first (TSN i.e. FSN
978 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
980 * Easy way the start of a new guy beyond
985 if ((chk->rec.data.fsn == control->fsn_included) ||
986 (control->pdapi_started)) {
988 * Ok this should not happen, if it does we
989 * started the pd-api on the higher TSN
990 * (since the equals part is a TSN failure
993 * We are completly hosed in that case since
994 * I have no way to recover. This really
995 * will only happen if we can get more TSN's
996 * higher before the pd-api-point.
998 sctp_abort_in_reasm(stcb, control, chk,
1000 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1005 * Ok we have two firsts and the one we just got is
1006 * smaller than the one we previously placed.. yuck!
1007 * We must swap them out.
1009 /* swap the mbufs */
1010 tdata = control->data;
1011 control->data = chk->data;
1013 /* Save the lengths */
1014 chk->send_size = control->length;
1015 /* Recompute length of control and tail pointer */
1016 sctp_setup_tail_pointer(control);
1017 /* Fix the FSN included */
1018 tmp = control->fsn_included;
1019 control->fsn_included = chk->rec.data.fsn;
1020 chk->rec.data.fsn = tmp;
1021 /* Fix the TSN included */
1022 tmp = control->sinfo_tsn;
1023 control->sinfo_tsn = chk->rec.data.tsn;
1024 chk->rec.data.tsn = tmp;
1025 /* Fix the PPID included */
1026 tmp = control->sinfo_ppid;
1027 control->sinfo_ppid = chk->rec.data.ppid;
1028 chk->rec.data.ppid = tmp;
1029 /* Fix tail pointer */
1032 control->first_frag_seen = 1;
1033 control->fsn_included = chk->rec.data.fsn;
1034 control->top_fsn = chk->rec.data.fsn;
1035 control->sinfo_tsn = chk->rec.data.tsn;
1036 control->sinfo_ppid = chk->rec.data.ppid;
1037 control->data = chk->data;
1038 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1040 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1041 sctp_setup_tail_pointer(control);
1046 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1047 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1049 * This one in queue is bigger than the new one,
1050 * insert the new one before at.
1052 asoc->size_on_reasm_queue += chk->send_size;
1053 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1055 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1057 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1059 * They sent a duplicate fsn number. This really
1060 * should not happen since the FSN is a TSN and it
1061 * should have been dropped earlier.
1063 sctp_abort_in_reasm(stcb, control, chk,
1065 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1070 if (inserted == 0) {
1071 /* Its at the end */
1072 asoc->size_on_reasm_queue += chk->send_size;
1073 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1074 control->top_fsn = chk->rec.data.fsn;
1075 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1080 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1081 struct sctp_stream_in *strm, int inp_read_lock_held)
1084 * Given a stream, strm, see if any of the SSN's on it that are
1085 * fragmented are ready to deliver. If so go ahead and place them on
1086 * the read queue. In so placing if we have hit the end, then we
1087 * need to remove them from the stream's queue.
1089 struct sctp_queued_to_read *control, *nctl = NULL;
1090 uint32_t next_to_del;
1094 if (stcb->sctp_socket) {
1095 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1096 stcb->sctp_ep->partial_delivery_point);
1098 pd_point = stcb->sctp_ep->partial_delivery_point;
1100 control = TAILQ_FIRST(&strm->uno_inqueue);
1102 if ((control != NULL) &&
1103 (asoc->idata_supported == 0)) {
1104 /* Special handling needed for "old" data format */
1105 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1109 if (strm->pd_api_started) {
1110 /* Can't add more */
1114 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1115 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1116 nctl = TAILQ_NEXT(control, next_instrm);
1117 if (control->end_added) {
1118 /* We just put the last bit on */
1119 if (control->on_strm_q) {
1121 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1122 panic("Huh control: %p on_q: %d -- not unordered?",
1123 control, control->on_strm_q);
1126 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1127 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1128 control->on_strm_q = 0;
1130 if (control->on_read_q == 0) {
1131 sctp_add_to_readq(stcb->sctp_ep, stcb,
1133 &stcb->sctp_socket->so_rcv, control->end_added,
1134 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1137 /* Can we do a PD-API for this un-ordered guy? */
1138 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1139 strm->pd_api_started = 1;
1140 control->pdapi_started = 1;
1141 sctp_add_to_readq(stcb->sctp_ep, stcb,
1143 &stcb->sctp_socket->so_rcv, control->end_added,
1144 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1152 control = TAILQ_FIRST(&strm->inqueue);
1153 if (strm->pd_api_started) {
1154 /* Can't add more */
1157 if (control == NULL) {
1160 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1162 * Ok the guy at the top was being partially delivered
1163 * completed, so we remove it. Note the pd_api flag was
1164 * taken off when the chunk was merged on in
1165 * sctp_queue_data_for_reasm below.
1167 nctl = TAILQ_NEXT(control, next_instrm);
1168 SCTPDBG(SCTP_DEBUG_XXX,
1169 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1170 control, control->end_added, control->mid,
1171 control->top_fsn, control->fsn_included,
1172 strm->last_mid_delivered);
1173 if (control->end_added) {
1174 if (control->on_strm_q) {
1176 if (control->on_strm_q != SCTP_ON_ORDERED) {
1177 panic("Huh control: %p on_q: %d -- not ordered?",
1178 control, control->on_strm_q);
1181 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1182 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1183 if (asoc->size_on_all_streams >= control->length) {
1184 asoc->size_on_all_streams -= control->length;
1187 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1189 asoc->size_on_all_streams = 0;
1192 sctp_ucount_decr(asoc->cnt_on_all_streams);
1193 control->on_strm_q = 0;
1195 if (strm->pd_api_started && control->pdapi_started) {
1196 control->pdapi_started = 0;
1197 strm->pd_api_started = 0;
1199 if (control->on_read_q == 0) {
1200 sctp_add_to_readq(stcb->sctp_ep, stcb,
1202 &stcb->sctp_socket->so_rcv, control->end_added,
1203 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1208 if (strm->pd_api_started) {
1210 * Can't add more must have gotten an un-ordered above being
1211 * partially delivered.
1216 next_to_del = strm->last_mid_delivered + 1;
1218 SCTPDBG(SCTP_DEBUG_XXX,
1219 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1220 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1222 nctl = TAILQ_NEXT(control, next_instrm);
1223 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1224 (control->first_frag_seen)) {
1227 /* Ok we can deliver it onto the stream. */
1228 if (control->end_added) {
1229 /* We are done with it afterwards */
1230 if (control->on_strm_q) {
1232 if (control->on_strm_q != SCTP_ON_ORDERED) {
1233 panic("Huh control: %p on_q: %d -- not ordered?",
1234 control, control->on_strm_q);
1237 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1238 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1239 if (asoc->size_on_all_streams >= control->length) {
1240 asoc->size_on_all_streams -= control->length;
1243 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1245 asoc->size_on_all_streams = 0;
1248 sctp_ucount_decr(asoc->cnt_on_all_streams);
1249 control->on_strm_q = 0;
1253 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1255 * A singleton now slipping through - mark
1256 * it non-revokable too
1258 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1259 } else if (control->end_added == 0) {
1261 * Check if we can defer adding until its
1264 if ((control->length < pd_point) || (strm->pd_api_started)) {
1266 * Don't need it or cannot add more
1267 * (one being delivered that way)
1272 done = (control->end_added) && (control->last_frag_seen);
1273 if (control->on_read_q == 0) {
1275 if (asoc->size_on_all_streams >= control->length) {
1276 asoc->size_on_all_streams -= control->length;
1279 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1281 asoc->size_on_all_streams = 0;
1284 strm->pd_api_started = 1;
1285 control->pdapi_started = 1;
1287 sctp_add_to_readq(stcb->sctp_ep, stcb,
1289 &stcb->sctp_socket->so_rcv, control->end_added,
1290 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1292 strm->last_mid_delivered = next_to_del;
1305 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1306 struct sctp_stream_in *strm,
1307 struct sctp_tcb *stcb, struct sctp_association *asoc,
1308 struct sctp_tmit_chunk *chk, int hold_rlock)
1311 * Given a control and a chunk, merge the data from the chk onto the
1312 * control and free up the chunk resources.
1317 if (control->on_read_q && (hold_rlock == 0)) {
1319 * Its being pd-api'd so we must do some locks.
1321 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1324 if (control->data == NULL) {
1325 control->data = chk->data;
1326 sctp_setup_tail_pointer(control);
1328 sctp_add_to_tail_pointer(control, chk->data, &added);
1330 control->fsn_included = chk->rec.data.fsn;
1331 asoc->size_on_reasm_queue -= chk->send_size;
1332 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1333 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1335 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1336 control->first_frag_seen = 1;
1337 control->sinfo_tsn = chk->rec.data.tsn;
1338 control->sinfo_ppid = chk->rec.data.ppid;
1340 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1342 if ((control->on_strm_q) && (control->on_read_q)) {
1343 if (control->pdapi_started) {
1344 control->pdapi_started = 0;
1345 strm->pd_api_started = 0;
1347 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1349 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1350 control->on_strm_q = 0;
1351 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1353 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1355 * Don't need to decrement
1356 * size_on_all_streams, since control is on
1359 sctp_ucount_decr(asoc->cnt_on_all_streams);
1360 control->on_strm_q = 0;
1362 } else if (control->on_strm_q) {
1363 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1364 control->on_strm_q);
1368 control->end_added = 1;
1369 control->last_frag_seen = 1;
1372 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1374 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1379 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1380 * queue, see if anthing can be delivered. If so pull it off (or as much as
1381 * we can. If we run out of space then we must dump what we can and set the
1382 * appropriate flag to say we queued what we could.
1385 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1386 struct sctp_queued_to_read *control,
1387 struct sctp_tmit_chunk *chk,
1388 int created_control,
1389 int *abort_flag, uint32_t tsn)
1392 struct sctp_tmit_chunk *at, *nat;
1393 struct sctp_stream_in *strm;
1394 int do_wakeup, unordered;
1397 strm = &asoc->strmin[control->sinfo_stream];
1399 * For old un-ordered data chunks.
1401 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1406 /* Must be added to the stream-in queue */
1407 if (created_control) {
1408 if (unordered == 0) {
1409 sctp_ucount_incr(asoc->cnt_on_all_streams);
1411 if (sctp_place_control_in_stream(strm, asoc, control)) {
1412 /* Duplicate SSN? */
1413 sctp_abort_in_reasm(stcb, control, chk,
1415 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1416 sctp_clean_up_control(stcb, control);
1419 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1421 * Ok we created this control and now lets validate
1422 * that its legal i.e. there is a B bit set, if not
1423 * and we have up to the cum-ack then its invalid.
1425 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1426 sctp_abort_in_reasm(stcb, control, chk,
1428 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1433 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1434 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1438 * Ok we must queue the chunk into the reasembly portion: o if its
1439 * the first it goes to the control mbuf. o if its not first but the
1440 * next in sequence it goes to the control, and each succeeding one
1441 * in order also goes. o if its not in order we place it on the list
1444 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1445 /* Its the very first one. */
1446 SCTPDBG(SCTP_DEBUG_XXX,
1447 "chunk is a first fsn: %u becomes fsn_included\n",
1449 if (control->first_frag_seen) {
1451 * Error on senders part, they either sent us two
1452 * data chunks with FIRST, or they sent two
1453 * un-ordered chunks that were fragmented at the
1454 * same time in the same stream.
1456 sctp_abort_in_reasm(stcb, control, chk,
1458 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1461 control->first_frag_seen = 1;
1462 control->sinfo_ppid = chk->rec.data.ppid;
1463 control->sinfo_tsn = chk->rec.data.tsn;
1464 control->fsn_included = chk->rec.data.fsn;
1465 control->data = chk->data;
1466 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1468 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1469 sctp_setup_tail_pointer(control);
1470 asoc->size_on_all_streams += control->length;
1472 /* Place the chunk in our list */
1475 if (control->last_frag_seen == 0) {
1476 /* Still willing to raise highest FSN seen */
1477 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1478 SCTPDBG(SCTP_DEBUG_XXX,
1479 "We have a new top_fsn: %u\n",
1481 control->top_fsn = chk->rec.data.fsn;
1483 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1484 SCTPDBG(SCTP_DEBUG_XXX,
1485 "The last fsn is now in place fsn: %u\n",
1487 control->last_frag_seen = 1;
1488 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1489 SCTPDBG(SCTP_DEBUG_XXX,
1490 "New fsn: %u is not at top_fsn: %u -- abort\n",
1493 sctp_abort_in_reasm(stcb, control, chk,
1495 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1499 if (asoc->idata_supported || control->first_frag_seen) {
1501 * For IDATA we always check since we know
1502 * that the first fragment is 0. For old
1503 * DATA we have to receive the first before
1504 * we know the first FSN (which is the TSN).
1506 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1508 * We have already delivered up to
1511 sctp_abort_in_reasm(stcb, control, chk,
1513 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1518 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1519 /* Second last? huh? */
1520 SCTPDBG(SCTP_DEBUG_XXX,
1521 "Duplicate last fsn: %u (top: %u) -- abort\n",
1522 chk->rec.data.fsn, control->top_fsn);
1523 sctp_abort_in_reasm(stcb, control,
1525 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1528 if (asoc->idata_supported || control->first_frag_seen) {
1530 * For IDATA we always check since we know
1531 * that the first fragment is 0. For old
1532 * DATA we have to receive the first before
1533 * we know the first FSN (which is the TSN).
1536 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1538 * We have already delivered up to
1541 SCTPDBG(SCTP_DEBUG_XXX,
1542 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1543 chk->rec.data.fsn, control->fsn_included);
1544 sctp_abort_in_reasm(stcb, control, chk,
1546 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1551 * validate not beyond top FSN if we have seen last
1554 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1555 SCTPDBG(SCTP_DEBUG_XXX,
1556 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1559 sctp_abort_in_reasm(stcb, control, chk,
1561 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1566 * If we reach here, we need to place the new chunk in the
1567 * reassembly for this control.
1569 SCTPDBG(SCTP_DEBUG_XXX,
1570 "chunk is a not first fsn: %u needs to be inserted\n",
1572 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1573 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1575 * This one in queue is bigger than the new
1576 * one, insert the new one before at.
1578 SCTPDBG(SCTP_DEBUG_XXX,
1579 "Insert it before fsn: %u\n",
1581 asoc->size_on_reasm_queue += chk->send_size;
1582 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1583 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1586 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1588 * Gak, He sent me a duplicate str seq
1592 * foo bar, I guess I will just free this
1593 * new guy, should we abort too? FIX ME
1594 * MAYBE? Or it COULD be that the SSN's have
1595 * wrapped. Maybe I should compare to TSN
1596 * somehow... sigh for now just blow away
1599 SCTPDBG(SCTP_DEBUG_XXX,
1600 "Duplicate to fsn: %u -- abort\n",
1602 sctp_abort_in_reasm(stcb, control,
1604 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1608 if (inserted == 0) {
1609 /* Goes on the end */
1610 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1612 asoc->size_on_reasm_queue += chk->send_size;
1613 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1614 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1618 * Ok lets see if we can suck any up into the control structure that
1619 * are in seq if it makes sense.
1623 * If the first fragment has not been seen there is no sense in
1626 if (control->first_frag_seen) {
1627 next_fsn = control->fsn_included + 1;
1628 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1629 if (at->rec.data.fsn == next_fsn) {
1630 /* We can add this one now to the control */
1631 SCTPDBG(SCTP_DEBUG_XXX,
1632 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1635 next_fsn, control->fsn_included);
1636 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1637 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1638 if (control->on_read_q) {
1642 * We only add to the
1643 * size-on-all-streams if its not on
1644 * the read q. The read q flag will
1645 * cause a sballoc so its accounted
1648 asoc->size_on_all_streams += lenadded;
1651 if (control->end_added && control->pdapi_started) {
1652 if (strm->pd_api_started) {
1653 strm->pd_api_started = 0;
1654 control->pdapi_started = 0;
1656 if (control->on_read_q == 0) {
1657 sctp_add_to_readq(stcb->sctp_ep, stcb,
1659 &stcb->sctp_socket->so_rcv, control->end_added,
1660 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1670 /* Need to wakeup the reader */
1671 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1675 static struct sctp_queued_to_read *
1676 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1678 struct sctp_queued_to_read *control;
1681 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1682 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1687 if (idata_supported) {
1688 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1689 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1694 control = TAILQ_FIRST(&strm->uno_inqueue);
1701 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1702 struct mbuf **m, int offset, int chk_length,
1703 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1704 int *break_flag, int last_chunk, uint8_t chk_type)
1706 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1707 uint32_t tsn, fsn, gap, mid;
1710 int need_reasm_check = 0;
1712 struct mbuf *op_err;
1713 char msg[SCTP_DIAG_INFO_LEN];
1714 struct sctp_queued_to_read *control, *ncontrol;
1717 struct sctp_stream_reset_list *liste;
1720 int created_control = 0;
1722 if (chk_type == SCTP_IDATA) {
1723 struct sctp_idata_chunk *chunk, chunk_buf;
1725 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1726 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1727 chk_flags = chunk->ch.chunk_flags;
1728 clen = sizeof(struct sctp_idata_chunk);
1729 tsn = ntohl(chunk->dp.tsn);
1730 sid = ntohs(chunk->dp.sid);
1731 mid = ntohl(chunk->dp.mid);
1732 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1734 ppid = chunk->dp.ppid_fsn.ppid;
1736 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1737 ppid = 0xffffffff; /* Use as an invalid value. */
1740 struct sctp_data_chunk *chunk, chunk_buf;
1742 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1743 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1744 chk_flags = chunk->ch.chunk_flags;
1745 clen = sizeof(struct sctp_data_chunk);
1746 tsn = ntohl(chunk->dp.tsn);
1747 sid = ntohs(chunk->dp.sid);
1748 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1750 ppid = chunk->dp.ppid;
1752 if ((size_t)chk_length == clen) {
1754 * Need to send an abort since we had a empty data chunk.
1756 op_err = sctp_generate_no_user_data_cause(tsn);
1757 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1758 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1762 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1763 asoc->send_sack = 1;
1765 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1766 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1767 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1772 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1773 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1774 /* It is a duplicate */
1775 SCTP_STAT_INCR(sctps_recvdupdata);
1776 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1777 /* Record a dup for the next outbound sack */
1778 asoc->dup_tsns[asoc->numduptsns] = tsn;
1781 asoc->send_sack = 1;
1784 /* Calculate the number of TSN's between the base and this TSN */
1785 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1786 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1787 /* Can't hold the bit in the mapping at max array, toss it */
1790 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1791 SCTP_TCB_LOCK_ASSERT(stcb);
1792 if (sctp_expand_mapping_array(asoc, gap)) {
1793 /* Can't expand, drop it */
1797 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1800 /* See if we have received this one already */
1801 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1802 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1803 SCTP_STAT_INCR(sctps_recvdupdata);
1804 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1805 /* Record a dup for the next outbound sack */
1806 asoc->dup_tsns[asoc->numduptsns] = tsn;
1809 asoc->send_sack = 1;
1813 * Check to see about the GONE flag, duplicates would cause a sack
1814 * to be sent up above
1816 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1817 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1818 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1820 * wait a minute, this guy is gone, there is no longer a
1821 * receiver. Send peer an ABORT!
1823 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1824 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1829 * Now before going further we see if there is room. If NOT then we
1830 * MAY let one through only IF this TSN is the one we are waiting
1831 * for on a partial delivery API.
1834 /* Is the stream valid? */
1835 if (sid >= asoc->streamincnt) {
1836 struct sctp_error_invalid_stream *cause;
1838 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1839 0, M_NOWAIT, 1, MT_DATA);
1840 if (op_err != NULL) {
1841 /* add some space up front so prepend will work well */
1842 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1843 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1845 * Error causes are just param's and this one has
1846 * two back to back phdr, one with the error type
1847 * and size, the other with the streamid and a rsvd
1849 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1850 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1851 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1852 cause->stream_id = htons(sid);
1853 cause->reserved = htons(0);
1854 sctp_queue_op_err(stcb, op_err);
1856 SCTP_STAT_INCR(sctps_badsid);
1857 SCTP_TCB_LOCK_ASSERT(stcb);
1858 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1859 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1860 asoc->highest_tsn_inside_nr_map = tsn;
1862 if (tsn == (asoc->cumulative_tsn + 1)) {
1863 /* Update cum-ack */
1864 asoc->cumulative_tsn = tsn;
1869 * If its a fragmented message, lets see if we can find the control
1870 * on the reassembly queues.
1872 if ((chk_type == SCTP_IDATA) &&
1873 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1876 * The first *must* be fsn 0, and other (middle/end) pieces
1877 * can *not* be fsn 0. XXX: This can happen in case of a
1878 * wrap around. Ignore is for now.
1880 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1884 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1885 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1886 chk_flags, control);
1887 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1888 /* See if we can find the re-assembly entity */
1889 if (control != NULL) {
1890 /* We found something, does it belong? */
1891 if (ordered && (mid != control->mid)) {
1892 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1894 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1895 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1896 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1900 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1902 * We can't have a switched order with an
1905 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1909 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1911 * We can't have a switched unordered with a
1914 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1921 * Its a complete segment. Lets validate we don't have a
1922 * re-assembly going on with the same Stream/Seq (for
1923 * ordered) or in the same Stream for unordered.
1925 if (control != NULL) {
1926 if (ordered || asoc->idata_supported) {
1927 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1929 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1932 if ((tsn == control->fsn_included + 1) &&
1933 (control->end_added == 0)) {
1934 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1942 /* now do the tests */
1943 if (((asoc->cnt_on_all_streams +
1944 asoc->cnt_on_reasm_queue +
1945 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1946 (((int)asoc->my_rwnd) <= 0)) {
1948 * When we have NO room in the rwnd we check to make sure
1949 * the reader is doing its job...
1951 if (stcb->sctp_socket->so_rcv.sb_cc) {
1952 /* some to read, wake-up */
1953 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1956 so = SCTP_INP_SO(stcb->sctp_ep);
1957 atomic_add_int(&stcb->asoc.refcnt, 1);
1958 SCTP_TCB_UNLOCK(stcb);
1959 SCTP_SOCKET_LOCK(so, 1);
1960 SCTP_TCB_LOCK(stcb);
1961 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1962 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1963 /* assoc was freed while we were unlocked */
1964 SCTP_SOCKET_UNLOCK(so, 1);
1968 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1969 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1970 SCTP_SOCKET_UNLOCK(so, 1);
1973 /* now is it in the mapping array of what we have accepted? */
1974 if (chk_type == SCTP_DATA) {
1975 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1976 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1977 /* Nope not in the valid range dump it */
1979 sctp_set_rwnd(stcb, asoc);
1980 if ((asoc->cnt_on_all_streams +
1981 asoc->cnt_on_reasm_queue +
1982 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1983 SCTP_STAT_INCR(sctps_datadropchklmt);
1985 SCTP_STAT_INCR(sctps_datadroprwnd);
1991 if (control == NULL) {
1994 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1999 #ifdef SCTP_ASOCLOG_OF_TSNS
2000 SCTP_TCB_LOCK_ASSERT(stcb);
2001 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
2002 asoc->tsn_in_at = 0;
2003 asoc->tsn_in_wrapped = 1;
2005 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
2006 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
2007 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
2008 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
2009 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
2010 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
2011 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
2012 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2016 * Before we continue lets validate that we are not being fooled by
2017 * an evil attacker. We can only have Nk chunks based on our TSN
2018 * spread allowed by the mapping array N * 8 bits, so there is no
2019 * way our stream sequence numbers could have wrapped. We of course
2020 * only validate the FIRST fragment so the bit must be set.
2022 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2023 (TAILQ_EMPTY(&asoc->resetHead)) &&
2024 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2025 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2026 /* The incoming sseq is behind where we last delivered? */
2027 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2028 mid, asoc->strmin[sid].last_mid_delivered);
2030 if (asoc->idata_supported) {
2031 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2032 asoc->strmin[sid].last_mid_delivered,
2037 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2038 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2043 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2044 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2045 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2049 if (chk_type == SCTP_IDATA) {
2050 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2052 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2054 if (last_chunk == 0) {
2055 if (chk_type == SCTP_IDATA) {
2056 dmbuf = SCTP_M_COPYM(*m,
2057 (offset + sizeof(struct sctp_idata_chunk)),
2060 dmbuf = SCTP_M_COPYM(*m,
2061 (offset + sizeof(struct sctp_data_chunk)),
2064 #ifdef SCTP_MBUF_LOGGING
2065 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2066 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2070 /* We can steal the last chunk */
2074 /* lop off the top part */
2075 if (chk_type == SCTP_IDATA) {
2076 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2078 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2080 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2081 l_len = SCTP_BUF_LEN(dmbuf);
2084 * need to count up the size hopefully does not hit
2090 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2091 l_len += SCTP_BUF_LEN(lat);
2094 if (l_len > the_len) {
2095 /* Trim the end round bytes off too */
2096 m_adj(dmbuf, -(l_len - the_len));
2099 if (dmbuf == NULL) {
2100 SCTP_STAT_INCR(sctps_nomem);
2104 * Now no matter what, we need a control, get one if we don't have
2105 * one (we may have gotten it above when we found the message was
2108 if (control == NULL) {
2109 sctp_alloc_a_readq(stcb, control);
2110 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2115 if (control == NULL) {
2116 SCTP_STAT_INCR(sctps_nomem);
2119 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2122 control->data = dmbuf;
2123 control->tail_mbuf = NULL;
2124 for (mm = control->data; mm; mm = mm->m_next) {
2125 control->length += SCTP_BUF_LEN(mm);
2126 if (SCTP_BUF_NEXT(mm) == NULL) {
2127 control->tail_mbuf = mm;
2130 control->end_added = 1;
2131 control->last_frag_seen = 1;
2132 control->first_frag_seen = 1;
2133 control->fsn_included = fsn;
2134 control->top_fsn = fsn;
2136 created_control = 1;
2138 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2139 chk_flags, ordered, mid, control);
2140 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2141 TAILQ_EMPTY(&asoc->resetHead) &&
2143 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2144 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2145 /* Candidate for express delivery */
2147 * Its not fragmented, No PD-API is up, Nothing in the
2148 * delivery queue, Its un-ordered OR ordered and the next to
2149 * deliver AND nothing else is stuck on the stream queue,
2150 * And there is room for it in the socket buffer. Lets just
2151 * stuff it up the buffer....
2153 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2154 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2155 asoc->highest_tsn_inside_nr_map = tsn;
2157 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2160 sctp_add_to_readq(stcb->sctp_ep, stcb,
2161 control, &stcb->sctp_socket->so_rcv,
2162 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2164 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2165 /* for ordered, bump what we delivered */
2166 asoc->strmin[sid].last_mid_delivered++;
2168 SCTP_STAT_INCR(sctps_recvexpress);
2169 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2170 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2171 SCTP_STR_LOG_FROM_EXPRS_DEL);
2174 goto finish_express_del;
2177 /* Now will we need a chunk too? */
2178 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2179 sctp_alloc_a_chunk(stcb, chk);
2181 /* No memory so we drop the chunk */
2182 SCTP_STAT_INCR(sctps_nomem);
2183 if (last_chunk == 0) {
2184 /* we copied it, free the copy */
2185 sctp_m_freem(dmbuf);
2189 chk->rec.data.tsn = tsn;
2190 chk->no_fr_allowed = 0;
2191 chk->rec.data.fsn = fsn;
2192 chk->rec.data.mid = mid;
2193 chk->rec.data.sid = sid;
2194 chk->rec.data.ppid = ppid;
2195 chk->rec.data.context = stcb->asoc.context;
2196 chk->rec.data.doing_fast_retransmit = 0;
2197 chk->rec.data.rcv_flags = chk_flags;
2199 chk->send_size = the_len;
2201 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2204 atomic_add_int(&net->ref_count, 1);
2207 /* Set the appropriate TSN mark */
2208 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2209 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2210 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2211 asoc->highest_tsn_inside_nr_map = tsn;
2214 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2215 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2216 asoc->highest_tsn_inside_map = tsn;
2219 /* Now is it complete (i.e. not fragmented)? */
2220 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2222 * Special check for when streams are resetting. We could be
2223 * more smart about this and check the actual stream to see
2224 * if it is not being reset.. that way we would not create a
2225 * HOLB when amongst streams being reset and those not being
2229 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2230 SCTP_TSN_GT(tsn, liste->tsn)) {
2232 * yep its past where we need to reset... go ahead
2235 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2237 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2239 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2240 unsigned char inserted = 0;
2242 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2243 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2248 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2253 if (inserted == 0) {
2255 * must be put at end, use prevP
2256 * (all setup from loop) to setup
2259 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2262 goto finish_express_del;
2264 if (chk_flags & SCTP_DATA_UNORDERED) {
2265 /* queue directly into socket buffer */
2266 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2268 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2269 sctp_add_to_readq(stcb->sctp_ep, stcb,
2271 &stcb->sctp_socket->so_rcv, 1,
2272 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2275 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2277 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2285 goto finish_express_del;
2287 /* If we reach here its a reassembly */
2288 need_reasm_check = 1;
2289 SCTPDBG(SCTP_DEBUG_XXX,
2290 "Queue data to stream for reasm control: %p MID: %u\n",
2292 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2295 * the assoc is now gone and chk was put onto the reasm
2296 * queue, which has all been freed.
2304 /* Here we tidy up things */
2305 if (tsn == (asoc->cumulative_tsn + 1)) {
2306 /* Update cum-ack */
2307 asoc->cumulative_tsn = tsn;
2313 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2315 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2317 SCTP_STAT_INCR(sctps_recvdata);
2318 /* Set it present please */
2319 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2320 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2322 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2323 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2324 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2326 if (need_reasm_check) {
2327 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2328 need_reasm_check = 0;
2330 /* check the special flag for stream resets */
2331 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2332 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2334 * we have finished working through the backlogged TSN's now
2335 * time to reset streams. 1: call reset function. 2: free
2336 * pending_reply space 3: distribute any chunks in
2337 * pending_reply_queue.
2339 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2340 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2341 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2342 SCTP_FREE(liste, SCTP_M_STRESET);
2343 /* sa_ignore FREED_MEMORY */
2344 liste = TAILQ_FIRST(&asoc->resetHead);
2345 if (TAILQ_EMPTY(&asoc->resetHead)) {
2346 /* All can be removed */
2347 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2348 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2349 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2353 if (need_reasm_check) {
2354 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2355 need_reasm_check = 0;
2359 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2360 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2364 * if control->sinfo_tsn is <= liste->tsn we
2365 * can process it which is the NOT of
2366 * control->sinfo_tsn > liste->tsn
2368 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2369 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2373 if (need_reasm_check) {
2374 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2375 need_reasm_check = 0;
2383 static const int8_t sctp_map_lookup_tab[256] = {
2384 0, 1, 0, 2, 0, 1, 0, 3,
2385 0, 1, 0, 2, 0, 1, 0, 4,
2386 0, 1, 0, 2, 0, 1, 0, 3,
2387 0, 1, 0, 2, 0, 1, 0, 5,
2388 0, 1, 0, 2, 0, 1, 0, 3,
2389 0, 1, 0, 2, 0, 1, 0, 4,
2390 0, 1, 0, 2, 0, 1, 0, 3,
2391 0, 1, 0, 2, 0, 1, 0, 6,
2392 0, 1, 0, 2, 0, 1, 0, 3,
2393 0, 1, 0, 2, 0, 1, 0, 4,
2394 0, 1, 0, 2, 0, 1, 0, 3,
2395 0, 1, 0, 2, 0, 1, 0, 5,
2396 0, 1, 0, 2, 0, 1, 0, 3,
2397 0, 1, 0, 2, 0, 1, 0, 4,
2398 0, 1, 0, 2, 0, 1, 0, 3,
2399 0, 1, 0, 2, 0, 1, 0, 7,
2400 0, 1, 0, 2, 0, 1, 0, 3,
2401 0, 1, 0, 2, 0, 1, 0, 4,
2402 0, 1, 0, 2, 0, 1, 0, 3,
2403 0, 1, 0, 2, 0, 1, 0, 5,
2404 0, 1, 0, 2, 0, 1, 0, 3,
2405 0, 1, 0, 2, 0, 1, 0, 4,
2406 0, 1, 0, 2, 0, 1, 0, 3,
2407 0, 1, 0, 2, 0, 1, 0, 6,
2408 0, 1, 0, 2, 0, 1, 0, 3,
2409 0, 1, 0, 2, 0, 1, 0, 4,
2410 0, 1, 0, 2, 0, 1, 0, 3,
2411 0, 1, 0, 2, 0, 1, 0, 5,
2412 0, 1, 0, 2, 0, 1, 0, 3,
2413 0, 1, 0, 2, 0, 1, 0, 4,
2414 0, 1, 0, 2, 0, 1, 0, 3,
2415 0, 1, 0, 2, 0, 1, 0, 8
2420 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2423 * Now we also need to check the mapping array in a couple of ways.
2424 * 1) Did we move the cum-ack point?
2426 * When you first glance at this you might think that all entries
2427 * that make up the position of the cum-ack would be in the
2428 * nr-mapping array only.. i.e. things up to the cum-ack are always
2429 * deliverable. Thats true with one exception, when its a fragmented
2430 * message we may not deliver the data until some threshold (or all
2431 * of it) is in place. So we must OR the nr_mapping_array and
2432 * mapping_array to get a true picture of the cum-ack.
2434 struct sctp_association *asoc;
2437 int slide_from, slide_end, lgap, distance;
2438 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2442 old_cumack = asoc->cumulative_tsn;
2443 old_base = asoc->mapping_array_base_tsn;
2444 old_highest = asoc->highest_tsn_inside_map;
2446 * We could probably improve this a small bit by calculating the
2447 * offset of the current cum-ack as the starting point.
2450 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2451 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2455 /* there is a 0 bit */
2456 at += sctp_map_lookup_tab[val];
2460 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2462 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2463 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2465 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2466 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2468 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2469 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2470 sctp_print_mapping_array(asoc);
2471 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2472 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2474 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2475 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2478 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2479 highest_tsn = asoc->highest_tsn_inside_nr_map;
2481 highest_tsn = asoc->highest_tsn_inside_map;
2483 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2484 /* The complete array was completed by a single FR */
2485 /* highest becomes the cum-ack */
2491 /* clear the array */
2492 clr = ((at + 7) >> 3);
2493 if (clr > asoc->mapping_array_size) {
2494 clr = asoc->mapping_array_size;
2496 memset(asoc->mapping_array, 0, clr);
2497 memset(asoc->nr_mapping_array, 0, clr);
2499 for (i = 0; i < asoc->mapping_array_size; i++) {
2500 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2501 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2502 sctp_print_mapping_array(asoc);
2506 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2507 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2508 } else if (at >= 8) {
2509 /* we can slide the mapping array down */
2510 /* slide_from holds where we hit the first NON 0xff byte */
2513 * now calculate the ceiling of the move using our highest
2516 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2517 slide_end = (lgap >> 3);
2518 if (slide_end < slide_from) {
2519 sctp_print_mapping_array(asoc);
2521 panic("impossible slide");
2523 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2524 lgap, slide_end, slide_from, at);
2528 if (slide_end > asoc->mapping_array_size) {
2530 panic("would overrun buffer");
2532 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2533 asoc->mapping_array_size, slide_end);
2534 slide_end = asoc->mapping_array_size;
2537 distance = (slide_end - slide_from) + 1;
2538 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2539 sctp_log_map(old_base, old_cumack, old_highest,
2540 SCTP_MAP_PREPARE_SLIDE);
2541 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2542 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2544 if (distance + slide_from > asoc->mapping_array_size ||
2547 * Here we do NOT slide forward the array so that
2548 * hopefully when more data comes in to fill it up
2549 * we will be able to slide it forward. Really I
2550 * don't think this should happen :-0
2553 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2554 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2555 (uint32_t)asoc->mapping_array_size,
2556 SCTP_MAP_SLIDE_NONE);
2561 for (ii = 0; ii < distance; ii++) {
2562 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2563 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2566 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2567 asoc->mapping_array[ii] = 0;
2568 asoc->nr_mapping_array[ii] = 0;
2570 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2571 asoc->highest_tsn_inside_map += (slide_from << 3);
2573 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2574 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2576 asoc->mapping_array_base_tsn += (slide_from << 3);
2577 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2578 sctp_log_map(asoc->mapping_array_base_tsn,
2579 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2580 SCTP_MAP_SLIDE_RESULT);
2587 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2589 struct sctp_association *asoc;
2590 uint32_t highest_tsn;
2593 sctp_slide_mapping_arrays(stcb);
2595 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2596 highest_tsn = asoc->highest_tsn_inside_nr_map;
2598 highest_tsn = asoc->highest_tsn_inside_map;
2600 /* Is there a gap now? */
2601 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2604 * Now we need to see if we need to queue a sack or just start the
2605 * timer (if allowed).
2607 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2609 * Ok special case, in SHUTDOWN-SENT case. here we maker
2610 * sure SACK timer is off and instead send a SHUTDOWN and a
2613 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2614 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2615 stcb->sctp_ep, stcb, NULL,
2616 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2618 sctp_send_shutdown(stcb,
2619 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2621 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2625 * CMT DAC algorithm: increase number of packets received
2628 stcb->asoc.cmt_dac_pkts_rcvd++;
2630 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2632 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2634 (stcb->asoc.numduptsns) || /* we have dup's */
2635 (is_a_gap) || /* is still a gap */
2636 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2637 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2640 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2641 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2642 (stcb->asoc.send_sack == 0) &&
2643 (stcb->asoc.numduptsns == 0) &&
2644 (stcb->asoc.delayed_ack) &&
2645 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2648 * CMT DAC algorithm: With CMT, delay acks
2649 * even in the face of
2651 * reordering. Therefore, if acks that do
2652 * not have to be sent because of the above
2653 * reasons, will be delayed. That is, acks
2654 * that would have been sent due to gap
2655 * reports will be delayed with DAC. Start
2656 * the delayed ack timer.
2658 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2659 stcb->sctp_ep, stcb, NULL);
2662 * Ok we must build a SACK since the timer
2663 * is pending, we got our first packet OR
2664 * there are gaps or duplicates.
2666 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2667 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2670 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2671 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2672 stcb->sctp_ep, stcb, NULL);
2679 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2680 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2681 struct sctp_nets *net, uint32_t *high_tsn)
2683 struct sctp_chunkhdr *ch, chunk_buf;
2684 struct sctp_association *asoc;
2685 int num_chunks = 0; /* number of control chunks processed */
2687 int break_flag, last_chunk;
2688 int abort_flag = 0, was_a_gap;
2690 uint32_t highest_tsn;
2691 uint16_t chk_length;
2694 sctp_set_rwnd(stcb, &stcb->asoc);
2697 SCTP_TCB_LOCK_ASSERT(stcb);
2699 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2700 highest_tsn = asoc->highest_tsn_inside_nr_map;
2702 highest_tsn = asoc->highest_tsn_inside_map;
2704 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2706 * setup where we got the last DATA packet from for any SACK that
2707 * may need to go out. Don't bump the net. This is done ONLY when a
2708 * chunk is assigned.
2710 asoc->last_data_chunk_from = net;
2713 * Now before we proceed we must figure out if this is a wasted
2714 * cluster... i.e. it is a small packet sent in and yet the driver
2715 * underneath allocated a full cluster for it. If so we must copy it
2716 * to a smaller mbuf and free up the cluster mbuf. This will help
2717 * with cluster starvation. Note for __Panda__ we don't do this
2718 * since it has clusters all the way down to 64 bytes.
2720 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2721 /* we only handle mbufs that are singletons.. not chains */
2722 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2724 /* ok lets see if we can copy the data up */
2727 /* get the pointers and copy */
2728 to = mtod(m, caddr_t *);
2729 from = mtod((*mm), caddr_t *);
2730 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2731 /* copy the length and free up the old */
2732 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2734 /* success, back copy */
2737 /* We are in trouble in the mbuf world .. yikes */
2741 /* get pointer to the first chunk header */
2742 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2743 sizeof(struct sctp_chunkhdr),
2744 (uint8_t *)&chunk_buf);
2749 * process all DATA chunks...
2751 *high_tsn = asoc->cumulative_tsn;
2753 asoc->data_pkts_seen++;
2754 while (stop_proc == 0) {
2755 /* validate chunk length */
2756 chk_length = ntohs(ch->chunk_length);
2757 if (length - *offset < chk_length) {
2758 /* all done, mutulated chunk */
2762 if ((asoc->idata_supported == 1) &&
2763 (ch->chunk_type == SCTP_DATA)) {
2764 struct mbuf *op_err;
2765 char msg[SCTP_DIAG_INFO_LEN];
2767 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2768 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2769 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2770 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2773 if ((asoc->idata_supported == 0) &&
2774 (ch->chunk_type == SCTP_IDATA)) {
2775 struct mbuf *op_err;
2776 char msg[SCTP_DIAG_INFO_LEN];
2778 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2779 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2780 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2781 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2784 if ((ch->chunk_type == SCTP_DATA) ||
2785 (ch->chunk_type == SCTP_IDATA)) {
2788 if (ch->chunk_type == SCTP_DATA) {
2789 clen = sizeof(struct sctp_data_chunk);
2791 clen = sizeof(struct sctp_idata_chunk);
2793 if (chk_length < clen) {
2795 * Need to send an abort since we had a
2796 * invalid data chunk.
2798 struct mbuf *op_err;
2799 char msg[SCTP_DIAG_INFO_LEN];
2801 snprintf(msg, sizeof(msg), "%s chunk of length %u",
2802 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2804 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2805 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2806 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2809 #ifdef SCTP_AUDITING_ENABLED
2810 sctp_audit_log(0xB1, 0);
2812 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2817 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2818 chk_length, net, high_tsn, &abort_flag, &break_flag,
2819 last_chunk, ch->chunk_type)) {
2827 * Set because of out of rwnd space and no
2828 * drop rep space left.
2834 /* not a data chunk in the data region */
2835 switch (ch->chunk_type) {
2836 case SCTP_INITIATION:
2837 case SCTP_INITIATION_ACK:
2838 case SCTP_SELECTIVE_ACK:
2839 case SCTP_NR_SELECTIVE_ACK:
2840 case SCTP_HEARTBEAT_REQUEST:
2841 case SCTP_HEARTBEAT_ACK:
2842 case SCTP_ABORT_ASSOCIATION:
2844 case SCTP_SHUTDOWN_ACK:
2845 case SCTP_OPERATION_ERROR:
2846 case SCTP_COOKIE_ECHO:
2847 case SCTP_COOKIE_ACK:
2850 case SCTP_SHUTDOWN_COMPLETE:
2851 case SCTP_AUTHENTICATION:
2852 case SCTP_ASCONF_ACK:
2853 case SCTP_PACKET_DROPPED:
2854 case SCTP_STREAM_RESET:
2855 case SCTP_FORWARD_CUM_TSN:
2859 * Now, what do we do with KNOWN
2860 * chunks that are NOT in the right
2863 * For now, I do nothing but ignore
2864 * them. We may later want to add
2865 * sysctl stuff to switch out and do
2866 * either an ABORT() or possibly
2869 struct mbuf *op_err;
2870 char msg[SCTP_DIAG_INFO_LEN];
2872 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2874 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2875 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2880 * Unknown chunk type: use bit rules after
2883 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2885 * Need to send an abort since we
2886 * had a invalid chunk.
2888 struct mbuf *op_err;
2889 char msg[SCTP_DIAG_INFO_LEN];
2891 snprintf(msg, sizeof(msg), "Chunk of length %u",
2893 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2894 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2895 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2898 if (ch->chunk_type & 0x40) {
2899 /* Add a error report to the queue */
2900 struct mbuf *op_err;
2901 struct sctp_gen_error_cause *cause;
2903 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2904 0, M_NOWAIT, 1, MT_DATA);
2905 if (op_err != NULL) {
2906 cause = mtod(op_err, struct sctp_gen_error_cause *);
2907 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2908 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2909 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2910 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2911 if (SCTP_BUF_NEXT(op_err) != NULL) {
2912 sctp_queue_op_err(stcb, op_err);
2914 sctp_m_freem(op_err);
2918 if ((ch->chunk_type & 0x80) == 0) {
2919 /* discard the rest of this packet */
2921 } /* else skip this bad chunk and
2924 } /* switch of chunk type */
2926 *offset += SCTP_SIZE32(chk_length);
2927 if ((*offset >= length) || stop_proc) {
2928 /* no more data left in the mbuf chain */
2932 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2933 sizeof(struct sctp_chunkhdr),
2934 (uint8_t *)&chunk_buf);
2943 * we need to report rwnd overrun drops.
2945 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2949 * Did we get data, if so update the time for auto-close and
2950 * give peer credit for being alive.
2952 SCTP_STAT_INCR(sctps_recvpktwithdata);
2953 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2954 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2955 stcb->asoc.overall_error_count,
2957 SCTP_FROM_SCTP_INDATA,
2960 stcb->asoc.overall_error_count = 0;
2961 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2963 /* now service all of the reassm queue if needed */
2964 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2965 /* Assure that we ack right away */
2966 stcb->asoc.send_sack = 1;
2968 /* Start a sack timer or QUEUE a SACK for sending */
2969 sctp_sack_check(stcb, was_a_gap);
2974 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2975 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2977 uint32_t *biggest_newly_acked_tsn,
2978 uint32_t *this_sack_lowest_newack,
2981 struct sctp_tmit_chunk *tp1;
2982 unsigned int theTSN;
2983 int j, wake_him = 0, circled = 0;
2985 /* Recover the tp1 we last saw */
2988 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2990 for (j = frag_strt; j <= frag_end; j++) {
2991 theTSN = j + last_tsn;
2993 if (tp1->rec.data.doing_fast_retransmit)
2997 * CMT: CUCv2 algorithm. For each TSN being
2998 * processed from the sent queue, track the
2999 * next expected pseudo-cumack, or
3000 * rtx_pseudo_cumack, if required. Separate
3001 * cumack trackers for first transmissions,
3002 * and retransmissions.
3004 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3005 (tp1->whoTo->find_pseudo_cumack == 1) &&
3006 (tp1->snd_count == 1)) {
3007 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
3008 tp1->whoTo->find_pseudo_cumack = 0;
3010 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3011 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
3012 (tp1->snd_count > 1)) {
3013 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
3014 tp1->whoTo->find_rtx_pseudo_cumack = 0;
3016 if (tp1->rec.data.tsn == theTSN) {
3017 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3019 * must be held until
3022 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3024 * If it is less than RESEND, it is
3025 * now no-longer in flight.
3026 * Higher values may already be set
3027 * via previous Gap Ack Blocks...
3028 * i.e. ACKED or RESEND.
3030 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3031 *biggest_newly_acked_tsn)) {
3032 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3035 * CMT: SFR algo (and HTNA) - set
3036 * saw_newack to 1 for dest being
3037 * newly acked. update
3038 * this_sack_highest_newack if
3041 if (tp1->rec.data.chunk_was_revoked == 0)
3042 tp1->whoTo->saw_newack = 1;
3044 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3045 tp1->whoTo->this_sack_highest_newack)) {
3046 tp1->whoTo->this_sack_highest_newack =
3050 * CMT DAC algo: also update
3051 * this_sack_lowest_newack
3053 if (*this_sack_lowest_newack == 0) {
3054 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3055 sctp_log_sack(*this_sack_lowest_newack,
3060 SCTP_LOG_TSN_ACKED);
3062 *this_sack_lowest_newack = tp1->rec.data.tsn;
3065 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3066 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3067 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3068 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3069 * Separate pseudo_cumack trackers for first transmissions and
3072 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3073 if (tp1->rec.data.chunk_was_revoked == 0) {
3074 tp1->whoTo->new_pseudo_cumack = 1;
3076 tp1->whoTo->find_pseudo_cumack = 1;
3078 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3079 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3081 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3082 if (tp1->rec.data.chunk_was_revoked == 0) {
3083 tp1->whoTo->new_pseudo_cumack = 1;
3085 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3087 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3088 sctp_log_sack(*biggest_newly_acked_tsn,
3093 SCTP_LOG_TSN_ACKED);
3095 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3096 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3097 tp1->whoTo->flight_size,
3099 (uint32_t)(uintptr_t)tp1->whoTo,
3102 sctp_flight_size_decrease(tp1);
3103 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3104 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3107 sctp_total_flight_decrease(stcb, tp1);
3109 tp1->whoTo->net_ack += tp1->send_size;
3110 if (tp1->snd_count < 2) {
3112 * True non-retransmitted chunk
3114 tp1->whoTo->net_ack2 += tp1->send_size;
3121 sctp_calculate_rto(stcb,
3124 &tp1->sent_rcv_time,
3125 SCTP_RTT_FROM_DATA)) {
3128 if (tp1->whoTo->rto_needed == 0) {
3129 tp1->whoTo->rto_needed = 1;
3136 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3137 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3138 stcb->asoc.this_sack_highest_gap)) {
3139 stcb->asoc.this_sack_highest_gap =
3142 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3143 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3144 #ifdef SCTP_AUDITING_ENABLED
3145 sctp_audit_log(0xB2,
3146 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3151 * All chunks NOT UNSENT fall through here and are marked
3152 * (leave PR-SCTP ones that are to skip alone though)
3154 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3155 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3156 tp1->sent = SCTP_DATAGRAM_MARKED;
3158 if (tp1->rec.data.chunk_was_revoked) {
3159 /* deflate the cwnd */
3160 tp1->whoTo->cwnd -= tp1->book_size;
3161 tp1->rec.data.chunk_was_revoked = 0;
3163 /* NR Sack code here */
3165 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3166 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3167 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3170 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3173 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3174 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3175 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3176 stcb->asoc.trigger_reset = 1;
3178 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3184 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3185 sctp_m_freem(tp1->data);
3192 } /* if (tp1->tsn == theTSN) */
3193 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3196 tp1 = TAILQ_NEXT(tp1, sctp_next);
3197 if ((tp1 == NULL) && (circled == 0)) {
3199 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3201 } /* end while (tp1) */
3204 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3206 /* In case the fragments were not in order we must reset */
3207 } /* end for (j = fragStart */
3209 return (wake_him); /* Return value only used for nr-sack */
3214 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3215 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3216 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3217 int num_seg, int num_nr_seg, int *rto_ok)
3219 struct sctp_gap_ack_block *frag, block;
3220 struct sctp_tmit_chunk *tp1;
3225 uint16_t frag_strt, frag_end, prev_frag_end;
3227 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3231 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3234 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3236 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3237 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3238 *offset += sizeof(block);
3240 return (chunk_freed);
3242 frag_strt = ntohs(frag->start);
3243 frag_end = ntohs(frag->end);
3245 if (frag_strt > frag_end) {
3246 /* This gap report is malformed, skip it. */
3249 if (frag_strt <= prev_frag_end) {
3250 /* This gap report is not in order, so restart. */
3251 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3253 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3254 *biggest_tsn_acked = last_tsn + frag_end;
3261 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3262 non_revocable, &num_frs, biggest_newly_acked_tsn,
3263 this_sack_lowest_newack, rto_ok)) {
3266 prev_frag_end = frag_end;
3268 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3270 sctp_log_fr(*biggest_tsn_acked,
3271 *biggest_newly_acked_tsn,
3272 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3274 return (chunk_freed);
3278 sctp_check_for_revoked(struct sctp_tcb *stcb,
3279 struct sctp_association *asoc, uint32_t cumack,
3280 uint32_t biggest_tsn_acked)
3282 struct sctp_tmit_chunk *tp1;
3284 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3285 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3287 * ok this guy is either ACK or MARKED. If it is
3288 * ACKED it has been previously acked but not this
3289 * time i.e. revoked. If it is MARKED it was ACK'ed
3292 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3295 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3296 /* it has been revoked */
3297 tp1->sent = SCTP_DATAGRAM_SENT;
3298 tp1->rec.data.chunk_was_revoked = 1;
3300 * We must add this stuff back in to assure
3301 * timers and such get started.
3303 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3304 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3305 tp1->whoTo->flight_size,
3307 (uint32_t)(uintptr_t)tp1->whoTo,
3310 sctp_flight_size_increase(tp1);
3311 sctp_total_flight_increase(stcb, tp1);
3313 * We inflate the cwnd to compensate for our
3314 * artificial inflation of the flight_size.
3316 tp1->whoTo->cwnd += tp1->book_size;
3317 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3318 sctp_log_sack(asoc->last_acked_seq,
3323 SCTP_LOG_TSN_REVOKED);
3325 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3326 /* it has been re-acked in this SACK */
3327 tp1->sent = SCTP_DATAGRAM_ACKED;
3330 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3337 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3338 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3340 struct sctp_tmit_chunk *tp1;
3341 int strike_flag = 0;
3343 int tot_retrans = 0;
3344 uint32_t sending_seq;
3345 struct sctp_nets *net;
3346 int num_dests_sacked = 0;
3349 * select the sending_seq, this is either the next thing ready to be
3350 * sent but not transmitted, OR, the next seq we assign.
3352 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3354 sending_seq = asoc->sending_seq;
3356 sending_seq = tp1->rec.data.tsn;
3359 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3360 if ((asoc->sctp_cmt_on_off > 0) &&
3361 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3362 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3363 if (net->saw_newack)
3367 if (stcb->asoc.prsctp_supported) {
3368 (void)SCTP_GETTIME_TIMEVAL(&now);
3370 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3372 if (tp1->no_fr_allowed) {
3373 /* this one had a timeout or something */
3376 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3377 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3378 sctp_log_fr(biggest_tsn_newly_acked,
3381 SCTP_FR_LOG_CHECK_STRIKE);
3383 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3384 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3388 if (stcb->asoc.prsctp_supported) {
3389 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3390 /* Is it expired? */
3391 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3392 /* Yes so drop it */
3393 if (tp1->data != NULL) {
3394 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3395 SCTP_SO_NOT_LOCKED);
3402 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3403 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3404 /* we are beyond the tsn in the sack */
3407 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3408 /* either a RESEND, ACKED, or MARKED */
3410 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3411 /* Continue strikin FWD-TSN chunks */
3412 tp1->rec.data.fwd_tsn_cnt++;
3417 * CMT : SFR algo (covers part of DAC and HTNA as well)
3419 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3421 * No new acks were receieved for data sent to this
3422 * dest. Therefore, according to the SFR algo for
3423 * CMT, no data sent to this dest can be marked for
3424 * FR using this SACK.
3427 } else if (tp1->whoTo &&
3428 SCTP_TSN_GT(tp1->rec.data.tsn,
3429 tp1->whoTo->this_sack_highest_newack) &&
3430 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3432 * CMT: New acks were receieved for data sent to
3433 * this dest. But no new acks were seen for data
3434 * sent after tp1. Therefore, according to the SFR
3435 * algo for CMT, tp1 cannot be marked for FR using
3436 * this SACK. This step covers part of the DAC algo
3437 * and the HTNA algo as well.
3442 * Here we check to see if we were have already done a FR
3443 * and if so we see if the biggest TSN we saw in the sack is
3444 * smaller than the recovery point. If so we don't strike
3445 * the tsn... otherwise we CAN strike the TSN.
3448 * @@@ JRI: Check for CMT if (accum_moved &&
3449 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3452 if (accum_moved && asoc->fast_retran_loss_recovery) {
3454 * Strike the TSN if in fast-recovery and cum-ack
3457 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3458 sctp_log_fr(biggest_tsn_newly_acked,
3461 SCTP_FR_LOG_STRIKE_CHUNK);
3463 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3466 if ((asoc->sctp_cmt_on_off > 0) &&
3467 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3469 * CMT DAC algorithm: If SACK flag is set to
3470 * 0, then lowest_newack test will not pass
3471 * because it would have been set to the
3472 * cumack earlier. If not already to be
3473 * rtx'd, If not a mixed sack and if tp1 is
3474 * not between two sacked TSNs, then mark by
3475 * one more. NOTE that we are marking by one
3476 * additional time since the SACK DAC flag
3477 * indicates that two packets have been
3478 * received after this missing TSN.
3480 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3481 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3482 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3483 sctp_log_fr(16 + num_dests_sacked,
3486 SCTP_FR_LOG_STRIKE_CHUNK);
3491 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3492 (asoc->sctp_cmt_on_off == 0)) {
3494 * For those that have done a FR we must take
3495 * special consideration if we strike. I.e the
3496 * biggest_newly_acked must be higher than the
3497 * sending_seq at the time we did the FR.
3500 #ifdef SCTP_FR_TO_ALTERNATE
3502 * If FR's go to new networks, then we must only do
3503 * this for singly homed asoc's. However if the FR's
3504 * go to the same network (Armando's work) then its
3505 * ok to FR multiple times.
3513 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3514 tp1->rec.data.fast_retran_tsn)) {
3516 * Strike the TSN, since this ack is
3517 * beyond where things were when we
3520 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3521 sctp_log_fr(biggest_tsn_newly_acked,
3524 SCTP_FR_LOG_STRIKE_CHUNK);
3526 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3530 if ((asoc->sctp_cmt_on_off > 0) &&
3531 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3533 * CMT DAC algorithm: If
3534 * SACK flag is set to 0,
3535 * then lowest_newack test
3536 * will not pass because it
3537 * would have been set to
3538 * the cumack earlier. If
3539 * not already to be rtx'd,
3540 * If not a mixed sack and
3541 * if tp1 is not between two
3542 * sacked TSNs, then mark by
3543 * one more. NOTE that we
3544 * are marking by one
3545 * additional time since the
3546 * SACK DAC flag indicates
3547 * that two packets have
3548 * been received after this
3551 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3552 (num_dests_sacked == 1) &&
3553 SCTP_TSN_GT(this_sack_lowest_newack,
3554 tp1->rec.data.tsn)) {
3555 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3556 sctp_log_fr(32 + num_dests_sacked,
3559 SCTP_FR_LOG_STRIKE_CHUNK);
3561 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3569 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3572 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3573 biggest_tsn_newly_acked)) {
3575 * We don't strike these: This is the HTNA
3576 * algorithm i.e. we don't strike If our TSN is
3577 * larger than the Highest TSN Newly Acked.
3581 /* Strike the TSN */
3582 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3583 sctp_log_fr(biggest_tsn_newly_acked,
3586 SCTP_FR_LOG_STRIKE_CHUNK);
3588 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3591 if ((asoc->sctp_cmt_on_off > 0) &&
3592 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3594 * CMT DAC algorithm: If SACK flag is set to
3595 * 0, then lowest_newack test will not pass
3596 * because it would have been set to the
3597 * cumack earlier. If not already to be
3598 * rtx'd, If not a mixed sack and if tp1 is
3599 * not between two sacked TSNs, then mark by
3600 * one more. NOTE that we are marking by one
3601 * additional time since the SACK DAC flag
3602 * indicates that two packets have been
3603 * received after this missing TSN.
3605 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3606 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3607 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3608 sctp_log_fr(48 + num_dests_sacked,
3611 SCTP_FR_LOG_STRIKE_CHUNK);
3617 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3618 struct sctp_nets *alt;
3620 /* fix counts and things */
3621 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3622 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3623 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3625 (uint32_t)(uintptr_t)tp1->whoTo,
3629 tp1->whoTo->net_ack++;
3630 sctp_flight_size_decrease(tp1);
3631 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3632 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3637 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3638 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3639 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3641 /* add back to the rwnd */
3642 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3644 /* remove from the total flight */
3645 sctp_total_flight_decrease(stcb, tp1);
3647 if ((stcb->asoc.prsctp_supported) &&
3648 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3650 * Has it been retransmitted tv_sec times? -
3651 * we store the retran count there.
3653 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3654 /* Yes, so drop it */
3655 if (tp1->data != NULL) {
3656 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3657 SCTP_SO_NOT_LOCKED);
3659 /* Make sure to flag we had a FR */
3660 if (tp1->whoTo != NULL) {
3661 tp1->whoTo->net_ack++;
3667 * SCTP_PRINTF("OK, we are now ready to FR this
3670 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3671 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3675 /* This is a subsequent FR */
3676 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3678 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3679 if (asoc->sctp_cmt_on_off > 0) {
3681 * CMT: Using RTX_SSTHRESH policy for CMT.
3682 * If CMT is being used, then pick dest with
3683 * largest ssthresh for any retransmission.
3685 tp1->no_fr_allowed = 1;
3687 /* sa_ignore NO_NULL_CHK */
3688 if (asoc->sctp_cmt_pf > 0) {
3690 * JRS 5/18/07 - If CMT PF is on,
3691 * use the PF version of
3694 alt = sctp_find_alternate_net(stcb, alt, 2);
3697 * JRS 5/18/07 - If only CMT is on,
3698 * use the CMT version of
3701 /* sa_ignore NO_NULL_CHK */
3702 alt = sctp_find_alternate_net(stcb, alt, 1);
3708 * CUCv2: If a different dest is picked for
3709 * the retransmission, then new
3710 * (rtx-)pseudo_cumack needs to be tracked
3711 * for orig dest. Let CUCv2 track new (rtx-)
3712 * pseudo-cumack always.
3715 tp1->whoTo->find_pseudo_cumack = 1;
3716 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3719 } else { /* CMT is OFF */
3721 #ifdef SCTP_FR_TO_ALTERNATE
3722 /* Can we find an alternate? */
3723 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3726 * default behavior is to NOT retransmit
3727 * FR's to an alternate. Armando Caro's
3728 * paper details why.
3734 tp1->rec.data.doing_fast_retransmit = 1;
3736 /* mark the sending seq for possible subsequent FR's */
3738 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3739 * (uint32_t)tpi->rec.data.tsn);
3741 if (TAILQ_EMPTY(&asoc->send_queue)) {
3743 * If the queue of send is empty then its
3744 * the next sequence number that will be
3745 * assigned so we subtract one from this to
3746 * get the one we last sent.
3748 tp1->rec.data.fast_retran_tsn = sending_seq;
3751 * If there are chunks on the send queue
3752 * (unsent data that has made it from the
3753 * stream queues but not out the door, we
3754 * take the first one (which will have the
3755 * lowest TSN) and subtract one to get the
3758 struct sctp_tmit_chunk *ttt;
3760 ttt = TAILQ_FIRST(&asoc->send_queue);
3761 tp1->rec.data.fast_retran_tsn =
3767 * this guy had a RTO calculation pending on
3770 if ((tp1->whoTo != NULL) &&
3771 (tp1->whoTo->rto_needed == 0)) {
3772 tp1->whoTo->rto_needed = 1;
3776 if (alt != tp1->whoTo) {
3777 /* yes, there is an alternate. */
3778 sctp_free_remote_addr(tp1->whoTo);
3779 /* sa_ignore FREED_MEMORY */
3781 atomic_add_int(&alt->ref_count, 1);
3787 struct sctp_tmit_chunk *
3788 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3789 struct sctp_association *asoc)
3791 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3795 if (asoc->prsctp_supported == 0) {
3798 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3799 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3800 tp1->sent != SCTP_DATAGRAM_RESEND &&
3801 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3802 /* no chance to advance, out of here */
3805 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3806 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3807 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3808 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3809 asoc->advanced_peer_ack_point,
3810 tp1->rec.data.tsn, 0, 0);
3813 if (!PR_SCTP_ENABLED(tp1->flags)) {
3815 * We can't fwd-tsn past any that are reliable aka
3816 * retransmitted until the asoc fails.
3821 (void)SCTP_GETTIME_TIMEVAL(&now);
3825 * now we got a chunk which is marked for another
3826 * retransmission to a PR-stream but has run out its chances
3827 * already maybe OR has been marked to skip now. Can we skip
3828 * it if its a resend?
3830 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3831 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3833 * Now is this one marked for resend and its time is
3836 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3837 /* Yes so drop it */
3839 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3840 1, SCTP_SO_NOT_LOCKED);
3844 * No, we are done when hit one for resend
3845 * whos time as not expired.
3851 * Ok now if this chunk is marked to drop it we can clean up
3852 * the chunk, advance our peer ack point and we can check
3855 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3856 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3857 /* advance PeerAckPoint goes forward */
3858 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3859 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3861 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3862 /* No update but we do save the chk */
3867 * If it is still in RESEND we can advance no
3877 sctp_fs_audit(struct sctp_association *asoc)
3879 struct sctp_tmit_chunk *chk;
3880 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3883 int entry_flight, entry_cnt;
3888 entry_flight = asoc->total_flight;
3889 entry_cnt = asoc->total_flight_count;
3891 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3894 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3895 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3896 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3901 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3903 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3905 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3912 if ((inflight > 0) || (inbetween > 0)) {
3914 panic("Flight size-express incorrect? \n");
3916 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3917 entry_flight, entry_cnt);
3919 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3920 inflight, inbetween, resend, above, acked);
3929 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3930 struct sctp_association *asoc,
3931 struct sctp_tmit_chunk *tp1)
3933 tp1->window_probe = 0;
3934 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3935 /* TSN's skipped we do NOT move back. */
3936 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3937 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3939 (uint32_t)(uintptr_t)tp1->whoTo,
3943 /* First setup this by shrinking flight */
3944 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3945 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3948 sctp_flight_size_decrease(tp1);
3949 sctp_total_flight_decrease(stcb, tp1);
3950 /* Now mark for resend */
3951 tp1->sent = SCTP_DATAGRAM_RESEND;
3952 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3954 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3955 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3956 tp1->whoTo->flight_size,
3958 (uint32_t)(uintptr_t)tp1->whoTo,
3964 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3965 uint32_t rwnd, int *abort_now, int ecne_seen)
3967 struct sctp_nets *net;
3968 struct sctp_association *asoc;
3969 struct sctp_tmit_chunk *tp1, *tp2;
3971 int win_probe_recovery = 0;
3972 int win_probe_recovered = 0;
3973 int j, done_once = 0;
3977 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3978 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3979 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3981 SCTP_TCB_LOCK_ASSERT(stcb);
3982 #ifdef SCTP_ASOCLOG_OF_TSNS
3983 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3984 stcb->asoc.cumack_log_at++;
3985 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3986 stcb->asoc.cumack_log_at = 0;
3990 old_rwnd = asoc->peers_rwnd;
3991 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3994 } else if (asoc->last_acked_seq == cumack) {
3995 /* Window update sack */
3996 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3997 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3998 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3999 /* SWS sender side engages */
4000 asoc->peers_rwnd = 0;
4002 if (asoc->peers_rwnd > old_rwnd) {
4008 /* First setup for CC stuff */
4009 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4010 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
4011 /* Drag along the window_tsn for cwr's */
4012 net->cwr_window_tsn = cumack;
4014 net->prev_cwnd = net->cwnd;
4019 * CMT: Reset CUC and Fast recovery algo variables before
4022 net->new_pseudo_cumack = 0;
4023 net->will_exit_fast_recovery = 0;
4024 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4025 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4028 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4029 tp1 = TAILQ_LAST(&asoc->sent_queue,
4030 sctpchunk_listhead);
4031 send_s = tp1->rec.data.tsn + 1;
4033 send_s = asoc->sending_seq;
4035 if (SCTP_TSN_GE(cumack, send_s)) {
4036 struct mbuf *op_err;
4037 char msg[SCTP_DIAG_INFO_LEN];
4041 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4043 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4044 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4045 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4048 asoc->this_sack_highest_gap = cumack;
4049 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4050 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4051 stcb->asoc.overall_error_count,
4053 SCTP_FROM_SCTP_INDATA,
4056 stcb->asoc.overall_error_count = 0;
4057 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4058 /* process the new consecutive TSN first */
4059 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4060 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4061 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4062 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4064 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4066 * If it is less than ACKED, it is
4067 * now no-longer in flight. Higher
4068 * values may occur during marking
4070 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4071 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4072 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4073 tp1->whoTo->flight_size,
4075 (uint32_t)(uintptr_t)tp1->whoTo,
4078 sctp_flight_size_decrease(tp1);
4079 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4080 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4083 /* sa_ignore NO_NULL_CHK */
4084 sctp_total_flight_decrease(stcb, tp1);
4086 tp1->whoTo->net_ack += tp1->send_size;
4087 if (tp1->snd_count < 2) {
4089 * True non-retransmitted
4092 tp1->whoTo->net_ack2 +=
4095 /* update RTO too? */
4098 sctp_calculate_rto(stcb,
4101 &tp1->sent_rcv_time,
4102 SCTP_RTT_FROM_DATA)) {
4105 if (tp1->whoTo->rto_needed == 0) {
4106 tp1->whoTo->rto_needed = 1;
4112 * CMT: CUCv2 algorithm. From the
4113 * cumack'd TSNs, for each TSN being
4114 * acked for the first time, set the
4115 * following variables for the
4116 * corresp destination.
4117 * new_pseudo_cumack will trigger a
4119 * find_(rtx_)pseudo_cumack will
4120 * trigger search for the next
4121 * expected (rtx-)pseudo-cumack.
4123 tp1->whoTo->new_pseudo_cumack = 1;
4124 tp1->whoTo->find_pseudo_cumack = 1;
4125 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4127 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4128 /* sa_ignore NO_NULL_CHK */
4129 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4132 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4133 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4135 if (tp1->rec.data.chunk_was_revoked) {
4136 /* deflate the cwnd */
4137 tp1->whoTo->cwnd -= tp1->book_size;
4138 tp1->rec.data.chunk_was_revoked = 0;
4140 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4141 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4142 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4145 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4149 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4150 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4151 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4152 asoc->trigger_reset = 1;
4154 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4156 /* sa_ignore NO_NULL_CHK */
4157 sctp_free_bufspace(stcb, asoc, tp1, 1);
4158 sctp_m_freem(tp1->data);
4161 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4162 sctp_log_sack(asoc->last_acked_seq,
4167 SCTP_LOG_FREE_SENT);
4169 asoc->sent_queue_cnt--;
4170 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4177 /* sa_ignore NO_NULL_CHK */
4178 if (stcb->sctp_socket) {
4179 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4183 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4185 /* sa_ignore NO_NULL_CHK */
4186 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4188 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4189 so = SCTP_INP_SO(stcb->sctp_ep);
4190 atomic_add_int(&stcb->asoc.refcnt, 1);
4191 SCTP_TCB_UNLOCK(stcb);
4192 SCTP_SOCKET_LOCK(so, 1);
4193 SCTP_TCB_LOCK(stcb);
4194 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4195 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4196 /* assoc was freed while we were unlocked */
4197 SCTP_SOCKET_UNLOCK(so, 1);
4201 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4202 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4203 SCTP_SOCKET_UNLOCK(so, 1);
4206 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4207 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4211 /* JRS - Use the congestion control given in the CC module */
4212 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4213 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4214 if (net->net_ack2 > 0) {
4216 * Karn's rule applies to clearing error
4217 * count, this is optional.
4219 net->error_count = 0;
4220 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4221 /* addr came good */
4222 net->dest_state |= SCTP_ADDR_REACHABLE;
4223 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4224 0, (void *)net, SCTP_SO_NOT_LOCKED);
4226 if (net == stcb->asoc.primary_destination) {
4227 if (stcb->asoc.alternate) {
4229 * release the alternate,
4232 sctp_free_remote_addr(stcb->asoc.alternate);
4233 stcb->asoc.alternate = NULL;
4236 if (net->dest_state & SCTP_ADDR_PF) {
4237 net->dest_state &= ~SCTP_ADDR_PF;
4238 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4239 stcb->sctp_ep, stcb, net,
4240 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4241 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4242 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4243 /* Done with this net */
4246 /* restore any doubled timers */
4247 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4248 if (net->RTO < stcb->asoc.minrto) {
4249 net->RTO = stcb->asoc.minrto;
4251 if (net->RTO > stcb->asoc.maxrto) {
4252 net->RTO = stcb->asoc.maxrto;
4256 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4258 asoc->last_acked_seq = cumack;
4260 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4261 /* nothing left in-flight */
4262 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4263 net->flight_size = 0;
4264 net->partial_bytes_acked = 0;
4266 asoc->total_flight = 0;
4267 asoc->total_flight_count = 0;
4271 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4272 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4273 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4274 /* SWS sender side engages */
4275 asoc->peers_rwnd = 0;
4277 if (asoc->peers_rwnd > old_rwnd) {
4278 win_probe_recovery = 1;
4280 /* Now assure a timer where data is queued at */
4283 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4284 if (win_probe_recovery && (net->window_probe)) {
4285 win_probe_recovered = 1;
4287 * Find first chunk that was used with window probe
4288 * and clear the sent
4290 /* sa_ignore FREED_MEMORY */
4291 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4292 if (tp1->window_probe) {
4293 /* move back to data send queue */
4294 sctp_window_probe_recovery(stcb, asoc, tp1);
4299 if (net->flight_size) {
4301 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4302 if (net->window_probe) {
4303 net->window_probe = 0;
4306 if (net->window_probe) {
4308 * In window probes we must assure a timer
4309 * is still running there
4311 net->window_probe = 0;
4312 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4313 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4315 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4316 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4318 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4323 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4324 (asoc->sent_queue_retran_cnt == 0) &&
4325 (win_probe_recovered == 0) &&
4328 * huh, this should not happen unless all packets are
4329 * PR-SCTP and marked to skip of course.
4331 if (sctp_fs_audit(asoc)) {
4332 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4333 net->flight_size = 0;
4335 asoc->total_flight = 0;
4336 asoc->total_flight_count = 0;
4337 asoc->sent_queue_retran_cnt = 0;
4338 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4339 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4340 sctp_flight_size_increase(tp1);
4341 sctp_total_flight_increase(stcb, tp1);
4342 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4343 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4350 /**********************************/
4351 /* Now what about shutdown issues */
4352 /**********************************/
4353 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4354 /* nothing left on sendqueue.. consider done */
4356 if ((asoc->stream_queue_cnt == 1) &&
4357 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4358 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4359 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4360 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4362 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4363 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4364 (asoc->stream_queue_cnt == 1) &&
4365 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4366 struct mbuf *op_err;
4370 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4371 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4372 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4375 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4376 (asoc->stream_queue_cnt == 0)) {
4377 struct sctp_nets *netp;
4379 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4380 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4381 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4383 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4384 sctp_stop_timers_for_shutdown(stcb);
4385 if (asoc->alternate) {
4386 netp = asoc->alternate;
4388 netp = asoc->primary_destination;
4390 sctp_send_shutdown(stcb, netp);
4391 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4392 stcb->sctp_ep, stcb, netp);
4393 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4394 stcb->sctp_ep, stcb, netp);
4395 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4396 (asoc->stream_queue_cnt == 0)) {
4397 struct sctp_nets *netp;
4399 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4400 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4401 sctp_stop_timers_for_shutdown(stcb);
4402 if (asoc->alternate) {
4403 netp = asoc->alternate;
4405 netp = asoc->primary_destination;
4407 sctp_send_shutdown_ack(stcb, netp);
4408 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4409 stcb->sctp_ep, stcb, netp);
4412 /*********************************************/
4413 /* Here we perform PR-SCTP procedures */
4415 /*********************************************/
4416 /* C1. update advancedPeerAckPoint */
4417 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4418 asoc->advanced_peer_ack_point = cumack;
4420 /* PR-Sctp issues need to be addressed too */
4421 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4422 struct sctp_tmit_chunk *lchk;
4423 uint32_t old_adv_peer_ack_point;
4425 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4426 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4427 /* C3. See if we need to send a Fwd-TSN */
4428 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4430 * ISSUE with ECN, see FWD-TSN processing.
4432 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4433 send_forward_tsn(stcb, asoc);
4435 /* try to FR fwd-tsn's that get lost too */
4436 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4437 send_forward_tsn(stcb, asoc);
4442 /* Assure a timer is up */
4443 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4444 stcb->sctp_ep, stcb, lchk->whoTo);
4447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4448 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4450 stcb->asoc.peers_rwnd,
4451 stcb->asoc.total_flight,
4452 stcb->asoc.total_output_queue_size);
4457 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4458 struct sctp_tcb *stcb,
4459 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4460 int *abort_now, uint8_t flags,
4461 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4463 struct sctp_association *asoc;
4464 struct sctp_tmit_chunk *tp1, *tp2;
4465 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4466 uint16_t wake_him = 0;
4467 uint32_t send_s = 0;
4469 int accum_moved = 0;
4470 int will_exit_fast_recovery = 0;
4471 uint32_t a_rwnd, old_rwnd;
4472 int win_probe_recovery = 0;
4473 int win_probe_recovered = 0;
4474 struct sctp_nets *net = NULL;
4477 uint8_t reneged_all = 0;
4478 uint8_t cmt_dac_flag;
4481 * we take any chance we can to service our queues since we cannot
4482 * get awoken when the socket is read from :<
4485 * Now perform the actual SACK handling: 1) Verify that it is not an
4486 * old sack, if so discard. 2) If there is nothing left in the send
4487 * queue (cum-ack is equal to last acked) then you have a duplicate
4488 * too, update any rwnd change and verify no timers are running.
4489 * then return. 3) Process any new consequtive data i.e. cum-ack
4490 * moved process these first and note that it moved. 4) Process any
4491 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4492 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4493 * sync up flightsizes and things, stop all timers and also check
4494 * for shutdown_pending state. If so then go ahead and send off the
4495 * shutdown. If in shutdown recv, send off the shutdown-ack and
4496 * start that timer, Ret. 9) Strike any non-acked things and do FR
4497 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4498 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4499 * if in shutdown_recv state.
4501 SCTP_TCB_LOCK_ASSERT(stcb);
4503 this_sack_lowest_newack = 0;
4504 SCTP_STAT_INCR(sctps_slowpath_sack);
4506 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4507 #ifdef SCTP_ASOCLOG_OF_TSNS
4508 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4509 stcb->asoc.cumack_log_at++;
4510 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4511 stcb->asoc.cumack_log_at = 0;
4516 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4517 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4518 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4521 old_rwnd = stcb->asoc.peers_rwnd;
4522 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4523 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4524 stcb->asoc.overall_error_count,
4526 SCTP_FROM_SCTP_INDATA,
4529 stcb->asoc.overall_error_count = 0;
4531 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4532 sctp_log_sack(asoc->last_acked_seq,
4539 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4541 uint32_t *dupdata, dblock;
4543 for (i = 0; i < num_dup; i++) {
4544 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4545 sizeof(uint32_t), (uint8_t *)&dblock);
4546 if (dupdata == NULL) {
4549 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4553 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4554 tp1 = TAILQ_LAST(&asoc->sent_queue,
4555 sctpchunk_listhead);
4556 send_s = tp1->rec.data.tsn + 1;
4559 send_s = asoc->sending_seq;
4561 if (SCTP_TSN_GE(cum_ack, send_s)) {
4562 struct mbuf *op_err;
4563 char msg[SCTP_DIAG_INFO_LEN];
4566 * no way, we have not even sent this TSN out yet. Peer is
4567 * hopelessly messed up with us.
4569 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4572 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4573 tp1->rec.data.tsn, (void *)tp1);
4578 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4580 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4581 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4582 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4585 /**********************/
4586 /* 1) check the range */
4587 /**********************/
4588 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4589 /* acking something behind */
4593 /* update the Rwnd of the peer */
4594 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4595 TAILQ_EMPTY(&asoc->send_queue) &&
4596 (asoc->stream_queue_cnt == 0)) {
4597 /* nothing left on send/sent and strmq */
4598 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4599 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4600 asoc->peers_rwnd, 0, 0, a_rwnd);
4602 asoc->peers_rwnd = a_rwnd;
4603 if (asoc->sent_queue_retran_cnt) {
4604 asoc->sent_queue_retran_cnt = 0;
4606 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4607 /* SWS sender side engages */
4608 asoc->peers_rwnd = 0;
4610 /* stop any timers */
4611 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4612 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4613 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4614 net->partial_bytes_acked = 0;
4615 net->flight_size = 0;
4617 asoc->total_flight = 0;
4618 asoc->total_flight_count = 0;
4622 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4623 * things. The total byte count acked is tracked in netAckSz AND
4624 * netAck2 is used to track the total bytes acked that are un-
4625 * amibguious and were never retransmitted. We track these on a per
4626 * destination address basis.
4628 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4629 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4630 /* Drag along the window_tsn for cwr's */
4631 net->cwr_window_tsn = cum_ack;
4633 net->prev_cwnd = net->cwnd;
4638 * CMT: Reset CUC and Fast recovery algo variables before
4641 net->new_pseudo_cumack = 0;
4642 net->will_exit_fast_recovery = 0;
4643 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4644 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4648 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4649 * to be greater than the cumack. Also reset saw_newack to 0
4652 net->saw_newack = 0;
4653 net->this_sack_highest_newack = last_tsn;
4655 /* process the new consecutive TSN first */
4656 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4657 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4658 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4660 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4662 * If it is less than ACKED, it is
4663 * now no-longer in flight. Higher
4664 * values may occur during marking
4666 if ((tp1->whoTo->dest_state &
4667 SCTP_ADDR_UNCONFIRMED) &&
4668 (tp1->snd_count < 2)) {
4670 * If there was no retran
4671 * and the address is
4672 * un-confirmed and we sent
4674 * sacked.. its confirmed,
4677 tp1->whoTo->dest_state &=
4678 ~SCTP_ADDR_UNCONFIRMED;
4680 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4681 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4682 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4683 tp1->whoTo->flight_size,
4685 (uint32_t)(uintptr_t)tp1->whoTo,
4688 sctp_flight_size_decrease(tp1);
4689 sctp_total_flight_decrease(stcb, tp1);
4690 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4691 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4695 tp1->whoTo->net_ack += tp1->send_size;
4697 /* CMT SFR and DAC algos */
4698 this_sack_lowest_newack = tp1->rec.data.tsn;
4699 tp1->whoTo->saw_newack = 1;
4701 if (tp1->snd_count < 2) {
4703 * True non-retransmitted
4706 tp1->whoTo->net_ack2 +=
4709 /* update RTO too? */
4712 sctp_calculate_rto(stcb,
4715 &tp1->sent_rcv_time,
4716 SCTP_RTT_FROM_DATA)) {
4719 if (tp1->whoTo->rto_needed == 0) {
4720 tp1->whoTo->rto_needed = 1;
4726 * CMT: CUCv2 algorithm. From the
4727 * cumack'd TSNs, for each TSN being
4728 * acked for the first time, set the
4729 * following variables for the
4730 * corresp destination.
4731 * new_pseudo_cumack will trigger a
4733 * find_(rtx_)pseudo_cumack will
4734 * trigger search for the next
4735 * expected (rtx-)pseudo-cumack.
4737 tp1->whoTo->new_pseudo_cumack = 1;
4738 tp1->whoTo->find_pseudo_cumack = 1;
4739 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4742 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4743 sctp_log_sack(asoc->last_acked_seq,
4748 SCTP_LOG_TSN_ACKED);
4750 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4751 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4754 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4755 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4756 #ifdef SCTP_AUDITING_ENABLED
4757 sctp_audit_log(0xB3,
4758 (asoc->sent_queue_retran_cnt & 0x000000ff));
4761 if (tp1->rec.data.chunk_was_revoked) {
4762 /* deflate the cwnd */
4763 tp1->whoTo->cwnd -= tp1->book_size;
4764 tp1->rec.data.chunk_was_revoked = 0;
4766 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4767 tp1->sent = SCTP_DATAGRAM_ACKED;
4774 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4775 /* always set this up to cum-ack */
4776 asoc->this_sack_highest_gap = last_tsn;
4778 if ((num_seg > 0) || (num_nr_seg > 0)) {
4781 * thisSackHighestGap will increase while handling NEW
4782 * segments this_sack_highest_newack will increase while
4783 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4784 * used for CMT DAC algo. saw_newack will also change.
4786 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4787 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4788 num_seg, num_nr_seg, &rto_ok)) {
4792 * validate the biggest_tsn_acked in the gap acks if strict
4793 * adherence is wanted.
4795 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4797 * peer is either confused or we are under attack.
4800 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4801 biggest_tsn_acked, send_s);
4805 /*******************************************/
4806 /* cancel ALL T3-send timer if accum moved */
4807 /*******************************************/
4808 if (asoc->sctp_cmt_on_off > 0) {
4809 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4810 if (net->new_pseudo_cumack)
4811 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4813 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4818 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4819 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4820 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4824 /********************************************/
4825 /* drop the acked chunks from the sentqueue */
4826 /********************************************/
4827 asoc->last_acked_seq = cum_ack;
4829 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4830 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4833 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4834 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4835 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4838 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4842 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4843 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4844 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4845 asoc->trigger_reset = 1;
4847 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4848 if (PR_SCTP_ENABLED(tp1->flags)) {
4849 if (asoc->pr_sctp_cnt != 0)
4850 asoc->pr_sctp_cnt--;
4852 asoc->sent_queue_cnt--;
4854 /* sa_ignore NO_NULL_CHK */
4855 sctp_free_bufspace(stcb, asoc, tp1, 1);
4856 sctp_m_freem(tp1->data);
4858 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4859 asoc->sent_queue_cnt_removeable--;
4862 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4863 sctp_log_sack(asoc->last_acked_seq,
4868 SCTP_LOG_FREE_SENT);
4870 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4873 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4875 panic("Warning flight size is positive and should be 0");
4877 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4878 asoc->total_flight);
4880 asoc->total_flight = 0;
4883 /* sa_ignore NO_NULL_CHK */
4884 if ((wake_him) && (stcb->sctp_socket)) {
4885 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4889 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4890 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4891 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4893 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4894 so = SCTP_INP_SO(stcb->sctp_ep);
4895 atomic_add_int(&stcb->asoc.refcnt, 1);
4896 SCTP_TCB_UNLOCK(stcb);
4897 SCTP_SOCKET_LOCK(so, 1);
4898 SCTP_TCB_LOCK(stcb);
4899 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4900 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4901 /* assoc was freed while we were unlocked */
4902 SCTP_SOCKET_UNLOCK(so, 1);
4906 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4907 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4908 SCTP_SOCKET_UNLOCK(so, 1);
4911 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4912 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4916 if (asoc->fast_retran_loss_recovery && accum_moved) {
4917 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4918 /* Setup so we will exit RFC2582 fast recovery */
4919 will_exit_fast_recovery = 1;
4923 * Check for revoked fragments:
4925 * if Previous sack - Had no frags then we can't have any revoked if
4926 * Previous sack - Had frag's then - If we now have frags aka
4927 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4928 * some of them. else - The peer revoked all ACKED fragments, since
4929 * we had some before and now we have NONE.
4933 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4934 asoc->saw_sack_with_frags = 1;
4935 } else if (asoc->saw_sack_with_frags) {
4936 int cnt_revoked = 0;
4938 /* Peer revoked all dg's marked or acked */
4939 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4940 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4941 tp1->sent = SCTP_DATAGRAM_SENT;
4942 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4943 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4944 tp1->whoTo->flight_size,
4946 (uint32_t)(uintptr_t)tp1->whoTo,
4949 sctp_flight_size_increase(tp1);
4950 sctp_total_flight_increase(stcb, tp1);
4951 tp1->rec.data.chunk_was_revoked = 1;
4953 * To ensure that this increase in
4954 * flightsize, which is artificial, does not
4955 * throttle the sender, we also increase the
4956 * cwnd artificially.
4958 tp1->whoTo->cwnd += tp1->book_size;
4965 asoc->saw_sack_with_frags = 0;
4968 asoc->saw_sack_with_nr_frags = 1;
4970 asoc->saw_sack_with_nr_frags = 0;
4972 /* JRS - Use the congestion control given in the CC module */
4973 if (ecne_seen == 0) {
4974 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4975 if (net->net_ack2 > 0) {
4977 * Karn's rule applies to clearing error
4978 * count, this is optional.
4980 net->error_count = 0;
4981 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4982 /* addr came good */
4983 net->dest_state |= SCTP_ADDR_REACHABLE;
4984 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4985 0, (void *)net, SCTP_SO_NOT_LOCKED);
4988 if (net == stcb->asoc.primary_destination) {
4989 if (stcb->asoc.alternate) {
4991 * release the alternate,
4994 sctp_free_remote_addr(stcb->asoc.alternate);
4995 stcb->asoc.alternate = NULL;
4999 if (net->dest_state & SCTP_ADDR_PF) {
5000 net->dest_state &= ~SCTP_ADDR_PF;
5001 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
5002 stcb->sctp_ep, stcb, net,
5003 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
5004 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
5005 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
5006 /* Done with this net */
5009 /* restore any doubled timers */
5010 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
5011 if (net->RTO < stcb->asoc.minrto) {
5012 net->RTO = stcb->asoc.minrto;
5014 if (net->RTO > stcb->asoc.maxrto) {
5015 net->RTO = stcb->asoc.maxrto;
5019 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5022 if (TAILQ_EMPTY(&asoc->sent_queue)) {
5023 /* nothing left in-flight */
5024 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5025 /* stop all timers */
5026 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5028 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5029 net->flight_size = 0;
5030 net->partial_bytes_acked = 0;
5032 asoc->total_flight = 0;
5033 asoc->total_flight_count = 0;
5036 /**********************************/
5037 /* Now what about shutdown issues */
5038 /**********************************/
5039 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5040 /* nothing left on sendqueue.. consider done */
5041 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5042 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5043 asoc->peers_rwnd, 0, 0, a_rwnd);
5045 asoc->peers_rwnd = a_rwnd;
5046 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5047 /* SWS sender side engages */
5048 asoc->peers_rwnd = 0;
5051 if ((asoc->stream_queue_cnt == 1) &&
5052 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5053 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5054 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5055 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5057 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5058 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5059 (asoc->stream_queue_cnt == 1) &&
5060 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5061 struct mbuf *op_err;
5065 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5066 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5067 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5070 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5071 (asoc->stream_queue_cnt == 0)) {
5072 struct sctp_nets *netp;
5074 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5075 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5076 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5078 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5079 sctp_stop_timers_for_shutdown(stcb);
5080 if (asoc->alternate) {
5081 netp = asoc->alternate;
5083 netp = asoc->primary_destination;
5085 sctp_send_shutdown(stcb, netp);
5086 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5087 stcb->sctp_ep, stcb, netp);
5088 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5089 stcb->sctp_ep, stcb, netp);
5091 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5092 (asoc->stream_queue_cnt == 0)) {
5093 struct sctp_nets *netp;
5095 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5096 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5097 sctp_stop_timers_for_shutdown(stcb);
5098 if (asoc->alternate) {
5099 netp = asoc->alternate;
5101 netp = asoc->primary_destination;
5103 sctp_send_shutdown_ack(stcb, netp);
5104 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5105 stcb->sctp_ep, stcb, netp);
5110 * Now here we are going to recycle net_ack for a different use...
5113 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5118 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5119 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5120 * automatically ensure that.
5122 if ((asoc->sctp_cmt_on_off > 0) &&
5123 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5124 (cmt_dac_flag == 0)) {
5125 this_sack_lowest_newack = cum_ack;
5127 if ((num_seg > 0) || (num_nr_seg > 0)) {
5128 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5129 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5131 /* JRS - Use the congestion control given in the CC module */
5132 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5134 /* Now are we exiting loss recovery ? */
5135 if (will_exit_fast_recovery) {
5136 /* Ok, we must exit fast recovery */
5137 asoc->fast_retran_loss_recovery = 0;
5139 if ((asoc->sat_t3_loss_recovery) &&
5140 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5141 /* end satellite t3 loss recovery */
5142 asoc->sat_t3_loss_recovery = 0;
5147 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5148 if (net->will_exit_fast_recovery) {
5149 /* Ok, we must exit fast recovery */
5150 net->fast_retran_loss_recovery = 0;
5154 /* Adjust and set the new rwnd value */
5155 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5156 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5157 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5159 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5160 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5161 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5162 /* SWS sender side engages */
5163 asoc->peers_rwnd = 0;
5165 if (asoc->peers_rwnd > old_rwnd) {
5166 win_probe_recovery = 1;
5170 * Now we must setup so we have a timer up for anyone with
5176 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5177 if (win_probe_recovery && (net->window_probe)) {
5178 win_probe_recovered = 1;
5180 * Find first chunk that was used with
5181 * window probe and clear the event. Put
5182 * it back into the send queue as if has
5185 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5186 if (tp1->window_probe) {
5187 sctp_window_probe_recovery(stcb, asoc, tp1);
5192 if (net->flight_size) {
5194 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5195 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5196 stcb->sctp_ep, stcb, net);
5198 if (net->window_probe) {
5199 net->window_probe = 0;
5202 if (net->window_probe) {
5204 * In window probes we must assure a timer
5205 * is still running there
5207 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5208 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5209 stcb->sctp_ep, stcb, net);
5212 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5213 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5215 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5220 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5221 (asoc->sent_queue_retran_cnt == 0) &&
5222 (win_probe_recovered == 0) &&
5225 * huh, this should not happen unless all packets are
5226 * PR-SCTP and marked to skip of course.
5228 if (sctp_fs_audit(asoc)) {
5229 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5230 net->flight_size = 0;
5232 asoc->total_flight = 0;
5233 asoc->total_flight_count = 0;
5234 asoc->sent_queue_retran_cnt = 0;
5235 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5236 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5237 sctp_flight_size_increase(tp1);
5238 sctp_total_flight_increase(stcb, tp1);
5239 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5240 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5247 /*********************************************/
5248 /* Here we perform PR-SCTP procedures */
5250 /*********************************************/
5251 /* C1. update advancedPeerAckPoint */
5252 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5253 asoc->advanced_peer_ack_point = cum_ack;
5255 /* C2. try to further move advancedPeerAckPoint ahead */
5256 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5257 struct sctp_tmit_chunk *lchk;
5258 uint32_t old_adv_peer_ack_point;
5260 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5261 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5262 /* C3. See if we need to send a Fwd-TSN */
5263 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5265 * ISSUE with ECN, see FWD-TSN processing.
5267 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5268 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5269 0xee, cum_ack, asoc->advanced_peer_ack_point,
5270 old_adv_peer_ack_point);
5272 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5273 send_forward_tsn(stcb, asoc);
5275 /* try to FR fwd-tsn's that get lost too */
5276 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5277 send_forward_tsn(stcb, asoc);
5282 /* Assure a timer is up */
5283 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5284 stcb->sctp_ep, stcb, lchk->whoTo);
5287 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5288 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5290 stcb->asoc.peers_rwnd,
5291 stcb->asoc.total_flight,
5292 stcb->asoc.total_output_queue_size);
5297 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5300 uint32_t cum_ack, a_rwnd;
5302 cum_ack = ntohl(cp->cumulative_tsn_ack);
5303 /* Arrange so a_rwnd does NOT change */
5304 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5306 /* Now call the express sack handling */
5307 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5311 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5312 struct sctp_stream_in *strmin)
5314 struct sctp_queued_to_read *control, *ncontrol;
5315 struct sctp_association *asoc;
5317 int need_reasm_check = 0;
5320 mid = strmin->last_mid_delivered;
5322 * First deliver anything prior to and including the stream no that
5325 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5326 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5327 /* this is deliverable now */
5328 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5329 if (control->on_strm_q) {
5330 if (control->on_strm_q == SCTP_ON_ORDERED) {
5331 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5332 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5333 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5336 panic("strmin: %p ctl: %p unknown %d",
5337 strmin, control, control->on_strm_q);
5340 control->on_strm_q = 0;
5342 /* subtract pending on streams */
5343 if (asoc->size_on_all_streams >= control->length) {
5344 asoc->size_on_all_streams -= control->length;
5347 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5349 asoc->size_on_all_streams = 0;
5352 sctp_ucount_decr(asoc->cnt_on_all_streams);
5353 /* deliver it to at least the delivery-q */
5354 if (stcb->sctp_socket) {
5355 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5356 sctp_add_to_readq(stcb->sctp_ep, stcb,
5358 &stcb->sctp_socket->so_rcv,
5359 1, SCTP_READ_LOCK_HELD,
5360 SCTP_SO_NOT_LOCKED);
5363 /* Its a fragmented message */
5364 if (control->first_frag_seen) {
5366 * Make it so this is next to
5367 * deliver, we restore later
5369 strmin->last_mid_delivered = control->mid - 1;
5370 need_reasm_check = 1;
5375 /* no more delivery now. */
5379 if (need_reasm_check) {
5382 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5383 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5384 /* Restore the next to deliver unless we are ahead */
5385 strmin->last_mid_delivered = mid;
5388 /* Left the front Partial one on */
5391 need_reasm_check = 0;
5394 * now we must deliver things in queue the normal way if any are
5397 mid = strmin->last_mid_delivered + 1;
5398 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5399 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5400 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5401 /* this is deliverable now */
5402 if (control->on_strm_q) {
5403 if (control->on_strm_q == SCTP_ON_ORDERED) {
5404 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5405 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5406 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5409 panic("strmin: %p ctl: %p unknown %d",
5410 strmin, control, control->on_strm_q);
5413 control->on_strm_q = 0;
5415 /* subtract pending on streams */
5416 if (asoc->size_on_all_streams >= control->length) {
5417 asoc->size_on_all_streams -= control->length;
5420 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5422 asoc->size_on_all_streams = 0;
5425 sctp_ucount_decr(asoc->cnt_on_all_streams);
5426 /* deliver it to at least the delivery-q */
5427 strmin->last_mid_delivered = control->mid;
5428 if (stcb->sctp_socket) {
5429 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5430 sctp_add_to_readq(stcb->sctp_ep, stcb,
5432 &stcb->sctp_socket->so_rcv, 1,
5433 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5436 mid = strmin->last_mid_delivered + 1;
5438 /* Its a fragmented message */
5439 if (control->first_frag_seen) {
5441 * Make it so this is next to
5444 strmin->last_mid_delivered = control->mid - 1;
5445 need_reasm_check = 1;
5453 if (need_reasm_check) {
5454 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5461 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5462 struct sctp_association *asoc,
5463 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5465 struct sctp_queued_to_read *control;
5466 struct sctp_stream_in *strm;
5467 struct sctp_tmit_chunk *chk, *nchk;
5468 int cnt_removed = 0;
5471 * For now large messages held on the stream reasm that are complete
5472 * will be tossed too. We could in theory do more work to spin
5473 * through and stop after dumping one msg aka seeing the start of a
5474 * new msg at the head, and call the delivery function... to see if
5475 * it can be delivered... But for now we just dump everything on the
5478 strm = &asoc->strmin[stream];
5479 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5480 if (control == NULL) {
5484 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5487 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5488 /* Purge hanging chunks */
5489 if (!asoc->idata_supported && (ordered == 0)) {
5490 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5495 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5496 if (asoc->size_on_reasm_queue >= chk->send_size) {
5497 asoc->size_on_reasm_queue -= chk->send_size;
5500 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5502 asoc->size_on_reasm_queue = 0;
5505 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5507 sctp_m_freem(chk->data);
5510 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5512 if (!TAILQ_EMPTY(&control->reasm)) {
5513 /* This has to be old data, unordered */
5514 if (control->data) {
5515 sctp_m_freem(control->data);
5516 control->data = NULL;
5518 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5519 chk = TAILQ_FIRST(&control->reasm);
5520 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5521 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5522 sctp_add_chk_to_control(control, strm, stcb, asoc,
5523 chk, SCTP_READ_LOCK_HELD);
5525 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5528 if (control->on_strm_q == SCTP_ON_ORDERED) {
5529 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5530 if (asoc->size_on_all_streams >= control->length) {
5531 asoc->size_on_all_streams -= control->length;
5534 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5536 asoc->size_on_all_streams = 0;
5539 sctp_ucount_decr(asoc->cnt_on_all_streams);
5540 control->on_strm_q = 0;
5541 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5542 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5543 control->on_strm_q = 0;
5545 } else if (control->on_strm_q) {
5546 panic("strm: %p ctl: %p unknown %d",
5547 strm, control, control->on_strm_q);
5550 control->on_strm_q = 0;
5551 if (control->on_read_q == 0) {
5552 sctp_free_remote_addr(control->whoFrom);
5553 if (control->data) {
5554 sctp_m_freem(control->data);
5555 control->data = NULL;
5557 sctp_free_a_readq(stcb, control);
5562 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5563 struct sctp_forward_tsn_chunk *fwd,
5564 int *abort_flag, struct mbuf *m, int offset)
5566 /* The pr-sctp fwd tsn */
5568 * here we will perform all the data receiver side steps for
5569 * processing FwdTSN, as required in by pr-sctp draft:
5571 * Assume we get FwdTSN(x):
5573 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5574 * + others we have 3) examine and update re-ordering queue on
5575 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5576 * report where we are.
5578 struct sctp_association *asoc;
5579 uint32_t new_cum_tsn, gap;
5580 unsigned int i, fwd_sz, m_size;
5582 struct sctp_stream_in *strm;
5583 struct sctp_queued_to_read *control, *sv;
5586 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5587 SCTPDBG(SCTP_DEBUG_INDATA1,
5588 "Bad size too small/big fwd-tsn\n");
5591 m_size = (stcb->asoc.mapping_array_size << 3);
5592 /*************************************************************/
5593 /* 1. Here we update local cumTSN and shift the bitmap array */
5594 /*************************************************************/
5595 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5597 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5598 /* Already got there ... */
5602 * now we know the new TSN is more advanced, let's find the actual
5605 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5606 asoc->cumulative_tsn = new_cum_tsn;
5607 if (gap >= m_size) {
5608 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5609 struct mbuf *op_err;
5610 char msg[SCTP_DIAG_INFO_LEN];
5613 * out of range (of single byte chunks in the rwnd I
5614 * give out). This must be an attacker.
5617 snprintf(msg, sizeof(msg),
5618 "New cum ack %8.8x too high, highest TSN %8.8x",
5619 new_cum_tsn, asoc->highest_tsn_inside_map);
5620 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5621 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5622 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5625 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5627 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5628 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5629 asoc->highest_tsn_inside_map = new_cum_tsn;
5631 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5632 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5634 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5635 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5638 SCTP_TCB_LOCK_ASSERT(stcb);
5639 for (i = 0; i <= gap; i++) {
5640 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5641 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5642 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5643 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5644 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5649 /*************************************************************/
5650 /* 2. Clear up re-assembly queue */
5651 /*************************************************************/
5653 /* This is now done as part of clearing up the stream/seq */
5654 if (asoc->idata_supported == 0) {
5657 /* Flush all the un-ordered data based on cum-tsn */
5658 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5659 for (sid = 0; sid < asoc->streamincnt; sid++) {
5660 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5662 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5664 /*******************************************************/
5665 /* 3. Update the PR-stream re-ordering queues and fix */
5666 /* delivery issues as needed. */
5667 /*******************************************************/
5668 fwd_sz -= sizeof(*fwd);
5671 unsigned int num_str;
5672 uint32_t mid, cur_mid;
5674 uint16_t ordered, flags;
5675 struct sctp_strseq *stseq, strseqbuf;
5676 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5678 offset += sizeof(*fwd);
5680 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5681 if (asoc->idata_supported) {
5682 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5684 num_str = fwd_sz / sizeof(struct sctp_strseq);
5686 for (i = 0; i < num_str; i++) {
5687 if (asoc->idata_supported) {
5688 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5689 sizeof(struct sctp_strseq_mid),
5690 (uint8_t *)&strseqbuf_m);
5691 offset += sizeof(struct sctp_strseq_mid);
5692 if (stseq_m == NULL) {
5695 sid = ntohs(stseq_m->sid);
5696 mid = ntohl(stseq_m->mid);
5697 flags = ntohs(stseq_m->flags);
5698 if (flags & PR_SCTP_UNORDERED_FLAG) {
5704 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5705 sizeof(struct sctp_strseq),
5706 (uint8_t *)&strseqbuf);
5707 offset += sizeof(struct sctp_strseq);
5708 if (stseq == NULL) {
5711 sid = ntohs(stseq->sid);
5712 mid = (uint32_t)ntohs(stseq->ssn);
5720 * Ok we now look for the stream/seq on the read
5721 * queue where its not all delivered. If we find it
5722 * we transmute the read entry into a PDI_ABORTED.
5724 if (sid >= asoc->streamincnt) {
5725 /* screwed up streams, stop! */
5728 if ((asoc->str_of_pdapi == sid) &&
5729 (asoc->ssn_of_pdapi == mid)) {
5731 * If this is the one we were partially
5732 * delivering now then we no longer are.
5733 * Note this will change with the reassembly
5736 asoc->fragmented_delivery_inprogress = 0;
5738 strm = &asoc->strmin[sid];
5739 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5740 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5742 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5743 if ((control->sinfo_stream == sid) &&
5744 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5745 str_seq = (sid << 16) | (0x0000ffff & mid);
5746 control->pdapi_aborted = 1;
5747 sv = stcb->asoc.control_pdapi;
5748 control->end_added = 1;
5749 if (control->on_strm_q == SCTP_ON_ORDERED) {
5750 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5751 if (asoc->size_on_all_streams >= control->length) {
5752 asoc->size_on_all_streams -= control->length;
5755 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5757 asoc->size_on_all_streams = 0;
5760 sctp_ucount_decr(asoc->cnt_on_all_streams);
5761 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5762 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5764 } else if (control->on_strm_q) {
5765 panic("strm: %p ctl: %p unknown %d",
5766 strm, control, control->on_strm_q);
5769 control->on_strm_q = 0;
5770 stcb->asoc.control_pdapi = control;
5771 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5773 SCTP_PARTIAL_DELIVERY_ABORTED,
5775 SCTP_SO_NOT_LOCKED);
5776 stcb->asoc.control_pdapi = sv;
5778 } else if ((control->sinfo_stream == sid) &&
5779 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5780 /* We are past our victim SSN */
5784 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5785 /* Update the sequence number */
5786 strm->last_mid_delivered = mid;
5788 /* now kick the stream the new way */
5789 /* sa_ignore NO_NULL_CHK */
5790 sctp_kick_prsctp_reorder_queue(stcb, strm);
5792 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5795 * Now slide thing forward.
5797 sctp_slide_mapping_arrays(stcb);