2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
54 * NOTES: On the outbound side of things I need to check the sack timer to
55 * see if I should generate a sack into the chunk queue (if I have data to
56 * send that is and will be sending it .. for bundling.
58 * The callback in sctp_usrreq.c will get called when the socket is read from.
59 * This will cause sctp_service_queues() to get called on the top entry in
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64 struct sctp_stream_in *strm,
65 struct sctp_tcb *stcb,
66 struct sctp_association *asoc,
67 struct sctp_tmit_chunk *chk, int lock_held);
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 /* Calculate what the rwnd would be */
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
83 * This is really set wrong with respect to a 1-2-m socket. Since
84 * the sb_cc is the count that everyone as put up. When we re-write
85 * sctp_soreceive then we will fix this so that ONLY this
86 * associations data is taken into account.
88 if (stcb->sctp_socket == NULL) {
92 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
93 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
94 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
95 ("size_on_all_streams is %u", asoc->size_on_all_streams));
96 if (stcb->asoc.sb_cc == 0 &&
97 asoc->cnt_on_reasm_queue == 0 &&
98 asoc->cnt_on_all_streams == 0) {
99 /* Full rwnd granted */
100 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 /* get actual space */
104 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
106 * take out what has NOT been put on socket queue and we yet hold
109 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
110 asoc->cnt_on_reasm_queue * MSIZE));
111 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
112 asoc->cnt_on_all_streams * MSIZE));
118 /* what is the overhead of all these rwnd's */
119 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
121 * If the window gets too small due to ctrl-stuff, reduce it to 1,
122 * even it is 0. SWS engaged
124 if (calc < stcb->asoc.my_rwnd_control_len) {
133 * Build out our readq entry based on the incoming packet.
135 struct sctp_queued_to_read *
136 sctp_build_readq_entry(struct sctp_tcb *stcb,
137 struct sctp_nets *net,
138 uint32_t tsn, uint32_t ppid,
139 uint32_t context, uint16_t sid,
140 uint32_t mid, uint8_t flags,
143 struct sctp_queued_to_read *read_queue_e = NULL;
145 sctp_alloc_a_readq(stcb, read_queue_e);
146 if (read_queue_e == NULL) {
149 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
150 read_queue_e->sinfo_stream = sid;
151 read_queue_e->sinfo_flags = (flags << 8);
152 read_queue_e->sinfo_ppid = ppid;
153 read_queue_e->sinfo_context = context;
154 read_queue_e->sinfo_tsn = tsn;
155 read_queue_e->sinfo_cumtsn = tsn;
156 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
157 read_queue_e->mid = mid;
158 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
159 TAILQ_INIT(&read_queue_e->reasm);
160 read_queue_e->whoFrom = net;
161 atomic_add_int(&net->ref_count, 1);
162 read_queue_e->data = dm;
163 read_queue_e->stcb = stcb;
164 read_queue_e->port_from = stcb->rport;
166 return (read_queue_e);
170 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
172 struct sctp_extrcvinfo *seinfo;
173 struct sctp_sndrcvinfo *outinfo;
174 struct sctp_rcvinfo *rcvinfo;
175 struct sctp_nxtinfo *nxtinfo;
182 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
183 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
184 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
185 /* user does not want any ancillary data */
190 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
191 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
193 seinfo = (struct sctp_extrcvinfo *)sinfo;
194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
195 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
197 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
201 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
202 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
204 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
207 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
213 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
218 SCTP_BUF_LEN(ret) = 0;
220 /* We need a CMSG header followed by the struct */
221 cmh = mtod(ret, struct cmsghdr *);
223 * Make sure that there is no un-initialized padding between the
224 * cmsg header and cmsg data and after the cmsg data.
227 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
228 cmh->cmsg_level = IPPROTO_SCTP;
229 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
230 cmh->cmsg_type = SCTP_RCVINFO;
231 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
232 rcvinfo->rcv_sid = sinfo->sinfo_stream;
233 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
234 rcvinfo->rcv_flags = sinfo->sinfo_flags;
235 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
236 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
237 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
238 rcvinfo->rcv_context = sinfo->sinfo_context;
239 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
240 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
241 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
244 cmh->cmsg_level = IPPROTO_SCTP;
245 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
246 cmh->cmsg_type = SCTP_NXTINFO;
247 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
248 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
249 nxtinfo->nxt_flags = 0;
250 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
251 nxtinfo->nxt_flags |= SCTP_UNORDERED;
253 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
254 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
256 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
257 nxtinfo->nxt_flags |= SCTP_COMPLETE;
259 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
260 nxtinfo->nxt_length = seinfo->serinfo_next_length;
261 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
262 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
263 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
265 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
266 cmh->cmsg_level = IPPROTO_SCTP;
267 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
269 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
270 cmh->cmsg_type = SCTP_EXTRCV;
271 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
272 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
274 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
275 cmh->cmsg_type = SCTP_SNDRCV;
277 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
285 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
287 uint32_t gap, i, cumackp1;
289 int in_r = 0, in_nr = 0;
291 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
294 cumackp1 = asoc->cumulative_tsn + 1;
295 if (SCTP_TSN_GT(cumackp1, tsn)) {
297 * this tsn is behind the cum ack and thus we don't need to
298 * worry about it being moved from one to the other.
302 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
303 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
304 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
305 if ((in_r == 0) && (in_nr == 0)) {
307 panic("Things are really messed up now");
309 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
310 sctp_print_mapping_array(asoc);
314 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
316 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
317 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
318 asoc->highest_tsn_inside_nr_map = tsn;
320 if (tsn == asoc->highest_tsn_inside_map) {
321 /* We must back down to see what the new highest is */
322 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
323 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
324 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
325 asoc->highest_tsn_inside_map = i;
331 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
337 sctp_place_control_in_stream(struct sctp_stream_in *strm,
338 struct sctp_association *asoc,
339 struct sctp_queued_to_read *control)
341 struct sctp_queued_to_read *at;
342 struct sctp_readhead *q;
343 uint8_t flags, unordered;
345 flags = (control->sinfo_flags >> 8);
346 unordered = flags & SCTP_DATA_UNORDERED;
348 q = &strm->uno_inqueue;
349 if (asoc->idata_supported == 0) {
350 if (!TAILQ_EMPTY(q)) {
352 * Only one stream can be here in old style
357 TAILQ_INSERT_TAIL(q, control, next_instrm);
358 control->on_strm_q = SCTP_ON_UNORDERED;
364 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
365 control->end_added = 1;
366 control->first_frag_seen = 1;
367 control->last_frag_seen = 1;
369 if (TAILQ_EMPTY(q)) {
371 TAILQ_INSERT_HEAD(q, control, next_instrm);
373 control->on_strm_q = SCTP_ON_UNORDERED;
375 control->on_strm_q = SCTP_ON_ORDERED;
379 TAILQ_FOREACH(at, q, next_instrm) {
380 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
382 * one in queue is bigger than the new one,
383 * insert before this one
385 TAILQ_INSERT_BEFORE(at, control, next_instrm);
387 control->on_strm_q = SCTP_ON_UNORDERED;
389 control->on_strm_q = SCTP_ON_ORDERED;
392 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
394 * Gak, He sent me a duplicate msg id
395 * number?? return -1 to abort.
399 if (TAILQ_NEXT(at, next_instrm) == NULL) {
401 * We are at the end, insert it
404 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
405 sctp_log_strm_del(control, at,
406 SCTP_STR_LOG_FROM_INSERT_TL);
408 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
410 control->on_strm_q = SCTP_ON_UNORDERED;
412 control->on_strm_q = SCTP_ON_ORDERED;
423 sctp_abort_in_reasm(struct sctp_tcb *stcb,
424 struct sctp_queued_to_read *control,
425 struct sctp_tmit_chunk *chk,
426 int *abort_flag, int opspot)
428 char msg[SCTP_DIAG_INFO_LEN];
431 if (stcb->asoc.idata_supported) {
432 snprintf(msg, sizeof(msg),
433 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
435 control->fsn_included,
438 chk->rec.data.fsn, chk->rec.data.mid);
440 snprintf(msg, sizeof(msg),
441 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
443 control->fsn_included,
447 (uint16_t)chk->rec.data.mid);
449 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
450 sctp_m_freem(chk->data);
452 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
453 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
454 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
459 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
462 * The control could not be placed and must be cleaned.
464 struct sctp_tmit_chunk *chk, *nchk;
466 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
467 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
469 sctp_m_freem(chk->data);
471 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
473 sctp_free_a_readq(stcb, control);
477 * Queue the chunk either right into the socket buffer if it is the next one
478 * to go OR put it in the correct place in the delivery queue. If we do
479 * append to the so_buf, keep doing so until we are out of order as
480 * long as the control's entered are non-fragmented.
483 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
484 struct sctp_association *asoc,
485 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
488 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
489 * all the data in one stream this could happen quite rapidly. One
490 * could use the TSN to keep track of things, but this scheme breaks
491 * down in the other type of stream usage that could occur. Send a
492 * single msg to stream 0, send 4Billion messages to stream 1, now
493 * send a message to stream 0. You have a situation where the TSN
494 * has wrapped but not in the stream. Is this worth worrying about
495 * or should we just change our queue sort at the bottom to be by
498 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
499 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
500 * assignment this could happen... and I don't see how this would be
501 * a violation. So for now I am undecided an will leave the sort by
502 * SSN alone. Maybe a hybred approach is the answer
505 struct sctp_queued_to_read *at;
509 struct sctp_stream_in *strm;
510 char msg[SCTP_DIAG_INFO_LEN];
512 strm = &asoc->strmin[control->sinfo_stream];
513 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
514 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
516 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
517 /* The incoming sseq is behind where we last delivered? */
518 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
519 strm->last_mid_delivered, control->mid);
521 * throw it in the stream so it gets cleaned up in
522 * association destruction
524 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
525 if (asoc->idata_supported) {
526 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
527 strm->last_mid_delivered, control->sinfo_tsn,
528 control->sinfo_stream, control->mid);
530 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
531 (uint16_t)strm->last_mid_delivered,
533 control->sinfo_stream,
534 (uint16_t)control->mid);
536 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
537 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
538 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
544 asoc->size_on_all_streams += control->length;
545 sctp_ucount_incr(asoc->cnt_on_all_streams);
546 nxt_todel = strm->last_mid_delivered + 1;
547 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
548 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
551 so = SCTP_INP_SO(stcb->sctp_ep);
552 atomic_add_int(&stcb->asoc.refcnt, 1);
553 SCTP_TCB_UNLOCK(stcb);
554 SCTP_SOCKET_LOCK(so, 1);
556 atomic_subtract_int(&stcb->asoc.refcnt, 1);
557 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
558 SCTP_SOCKET_UNLOCK(so, 1);
562 /* can be delivered right away? */
563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
564 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
566 /* EY it wont be queued if it could be delivered directly */
568 if (asoc->size_on_all_streams >= control->length) {
569 asoc->size_on_all_streams -= control->length;
572 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
574 asoc->size_on_all_streams = 0;
577 sctp_ucount_decr(asoc->cnt_on_all_streams);
578 strm->last_mid_delivered++;
579 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
580 sctp_add_to_readq(stcb->sctp_ep, stcb,
582 &stcb->sctp_socket->so_rcv, 1,
583 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
584 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
586 nxt_todel = strm->last_mid_delivered + 1;
587 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
588 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
589 if (control->on_strm_q == SCTP_ON_ORDERED) {
590 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
591 if (asoc->size_on_all_streams >= control->length) {
592 asoc->size_on_all_streams -= control->length;
595 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
597 asoc->size_on_all_streams = 0;
600 sctp_ucount_decr(asoc->cnt_on_all_streams);
603 panic("Huh control: %p is on_strm_q: %d",
604 control, control->on_strm_q);
607 control->on_strm_q = 0;
608 strm->last_mid_delivered++;
610 * We ignore the return of deliver_data here
611 * since we always can hold the chunk on the
612 * d-queue. And we have a finite number that
613 * can be delivered from the strq.
615 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616 sctp_log_strm_del(control, NULL,
617 SCTP_STR_LOG_FROM_IMMED_DEL);
619 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
620 sctp_add_to_readq(stcb->sctp_ep, stcb,
622 &stcb->sctp_socket->so_rcv, 1,
623 SCTP_READ_LOCK_NOT_HELD,
626 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
631 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
632 SCTP_SOCKET_UNLOCK(so, 1);
637 * Ok, we did not deliver this guy, find the correct place
638 * to put it on the queue.
640 if (sctp_place_control_in_stream(strm, asoc, control)) {
641 snprintf(msg, sizeof(msg),
642 "Queue to str MID: %u duplicate",
644 sctp_clean_up_control(stcb, control);
645 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
646 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
647 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
655 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
657 struct mbuf *m, *prev = NULL;
658 struct sctp_tcb *stcb;
660 stcb = control->stcb;
661 control->held_length = 0;
665 if (SCTP_BUF_LEN(m) == 0) {
666 /* Skip mbufs with NO length */
669 control->data = sctp_m_free(m);
672 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
673 m = SCTP_BUF_NEXT(prev);
676 control->tail_mbuf = prev;
681 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
682 if (control->on_read_q) {
684 * On read queue so we must increment the SB stuff,
685 * we assume caller has done any locks of SB.
687 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
689 m = SCTP_BUF_NEXT(m);
692 control->tail_mbuf = prev;
697 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
699 struct mbuf *prev = NULL;
700 struct sctp_tcb *stcb;
702 stcb = control->stcb;
705 panic("Control broken");
710 if (control->tail_mbuf == NULL) {
713 sctp_setup_tail_pointer(control);
716 control->tail_mbuf->m_next = m;
718 if (SCTP_BUF_LEN(m) == 0) {
719 /* Skip mbufs with NO length */
722 control->tail_mbuf->m_next = sctp_m_free(m);
723 m = control->tail_mbuf->m_next;
725 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
726 m = SCTP_BUF_NEXT(prev);
729 control->tail_mbuf = prev;
734 if (control->on_read_q) {
736 * On read queue so we must increment the SB stuff,
737 * we assume caller has done any locks of SB.
739 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
741 *added += SCTP_BUF_LEN(m);
742 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
743 m = SCTP_BUF_NEXT(m);
746 control->tail_mbuf = prev;
751 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
753 memset(nc, 0, sizeof(struct sctp_queued_to_read));
754 nc->sinfo_stream = control->sinfo_stream;
755 nc->mid = control->mid;
756 TAILQ_INIT(&nc->reasm);
757 nc->top_fsn = control->top_fsn;
758 nc->mid = control->mid;
759 nc->sinfo_flags = control->sinfo_flags;
760 nc->sinfo_ppid = control->sinfo_ppid;
761 nc->sinfo_context = control->sinfo_context;
762 nc->fsn_included = 0xffffffff;
763 nc->sinfo_tsn = control->sinfo_tsn;
764 nc->sinfo_cumtsn = control->sinfo_cumtsn;
765 nc->sinfo_assoc_id = control->sinfo_assoc_id;
766 nc->whoFrom = control->whoFrom;
767 atomic_add_int(&nc->whoFrom->ref_count, 1);
768 nc->stcb = control->stcb;
769 nc->port_from = control->port_from;
773 sctp_reset_a_control(struct sctp_queued_to_read *control,
774 struct sctp_inpcb *inp, uint32_t tsn)
776 control->fsn_included = tsn;
777 if (control->on_read_q) {
779 * We have to purge it from there, hopefully this will work
782 TAILQ_REMOVE(&inp->read_queue, control, next);
783 control->on_read_q = 0;
788 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
789 struct sctp_association *asoc,
790 struct sctp_stream_in *strm,
791 struct sctp_queued_to_read *control,
793 int inp_read_lock_held)
796 * Special handling for the old un-ordered data chunk. All the
797 * chunks/TSN's go to mid 0. So we have to do the old style watching
798 * to see if we have it all. If you return one, no other control
799 * entries on the un-ordered queue will be looked at. In theory
800 * there should be no others entries in reality, unless the guy is
801 * sending both unordered NDATA and unordered DATA...
803 struct sctp_tmit_chunk *chk, *lchk, *tchk;
805 struct sctp_queued_to_read *nc;
808 if (control->first_frag_seen == 0) {
809 /* Nothing we can do, we have not seen the first piece yet */
812 /* Collapse any we can */
815 fsn = control->fsn_included + 1;
816 /* Now what can we add? */
817 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
818 if (chk->rec.data.fsn == fsn) {
820 sctp_alloc_a_readq(stcb, nc);
824 memset(nc, 0, sizeof(struct sctp_queued_to_read));
825 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
826 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
830 if (control->end_added) {
832 if (!TAILQ_EMPTY(&control->reasm)) {
834 * Ok we have to move anything left
835 * on the control queue to a new
838 sctp_build_readq_entry_from_ctl(nc, control);
839 tchk = TAILQ_FIRST(&control->reasm);
840 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
841 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
842 if (asoc->size_on_reasm_queue >= tchk->send_size) {
843 asoc->size_on_reasm_queue -= tchk->send_size;
846 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
848 asoc->size_on_reasm_queue = 0;
851 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
852 nc->first_frag_seen = 1;
853 nc->fsn_included = tchk->rec.data.fsn;
854 nc->data = tchk->data;
855 nc->sinfo_ppid = tchk->rec.data.ppid;
856 nc->sinfo_tsn = tchk->rec.data.tsn;
857 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
859 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
860 sctp_setup_tail_pointer(nc);
861 tchk = TAILQ_FIRST(&control->reasm);
863 /* Spin the rest onto the queue */
865 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
866 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
867 tchk = TAILQ_FIRST(&control->reasm);
870 * Now lets add it to the queue
871 * after removing control
873 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
874 nc->on_strm_q = SCTP_ON_UNORDERED;
875 if (control->on_strm_q) {
876 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
877 control->on_strm_q = 0;
880 if (control->pdapi_started) {
881 strm->pd_api_started = 0;
882 control->pdapi_started = 0;
884 if (control->on_strm_q) {
885 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
886 control->on_strm_q = 0;
887 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
889 if (control->on_read_q == 0) {
890 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
891 &stcb->sctp_socket->so_rcv, control->end_added,
892 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
894 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
895 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
897 * Switch to the new guy and
903 if (nc->on_strm_q == 0) {
904 sctp_free_a_readq(stcb, nc);
909 sctp_free_a_readq(stcb, nc);
916 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
917 strm->pd_api_started = 1;
918 control->pdapi_started = 1;
919 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
920 &stcb->sctp_socket->so_rcv, control->end_added,
921 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
922 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
930 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
931 struct sctp_association *asoc,
932 struct sctp_queued_to_read *control,
933 struct sctp_tmit_chunk *chk,
936 struct sctp_tmit_chunk *at;
940 * Here we need to place the chunk into the control structure sorted
941 * in the correct order.
943 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
944 /* Its the very first one. */
945 SCTPDBG(SCTP_DEBUG_XXX,
946 "chunk is a first fsn: %u becomes fsn_included\n",
948 at = TAILQ_FIRST(&control->reasm);
949 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
951 * The first chunk in the reassembly is a smaller
952 * TSN than this one, even though this has a first,
953 * it must be from a subsequent msg.
957 if (control->first_frag_seen) {
959 * In old un-ordered we can reassembly on one
960 * control multiple messages. As long as the next
961 * FIRST is greater then the old first (TSN i.e. FSN
967 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
969 * Easy way the start of a new guy beyond
974 if ((chk->rec.data.fsn == control->fsn_included) ||
975 (control->pdapi_started)) {
977 * Ok this should not happen, if it does we
978 * started the pd-api on the higher TSN
979 * (since the equals part is a TSN failure
982 * We are completly hosed in that case since
983 * I have no way to recover. This really
984 * will only happen if we can get more TSN's
985 * higher before the pd-api-point.
987 sctp_abort_in_reasm(stcb, control, chk,
989 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
994 * Ok we have two firsts and the one we just got is
995 * smaller than the one we previously placed.. yuck!
996 * We must swap them out.
999 tdata = control->data;
1000 control->data = chk->data;
1002 /* Save the lengths */
1003 chk->send_size = control->length;
1004 /* Recompute length of control and tail pointer */
1005 sctp_setup_tail_pointer(control);
1006 /* Fix the FSN included */
1007 tmp = control->fsn_included;
1008 control->fsn_included = chk->rec.data.fsn;
1009 chk->rec.data.fsn = tmp;
1010 /* Fix the TSN included */
1011 tmp = control->sinfo_tsn;
1012 control->sinfo_tsn = chk->rec.data.tsn;
1013 chk->rec.data.tsn = tmp;
1014 /* Fix the PPID included */
1015 tmp = control->sinfo_ppid;
1016 control->sinfo_ppid = chk->rec.data.ppid;
1017 chk->rec.data.ppid = tmp;
1018 /* Fix tail pointer */
1021 control->first_frag_seen = 1;
1022 control->fsn_included = chk->rec.data.fsn;
1023 control->top_fsn = chk->rec.data.fsn;
1024 control->sinfo_tsn = chk->rec.data.tsn;
1025 control->sinfo_ppid = chk->rec.data.ppid;
1026 control->data = chk->data;
1027 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1029 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1030 sctp_setup_tail_pointer(control);
1035 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1036 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1038 * This one in queue is bigger than the new one,
1039 * insert the new one before at.
1041 asoc->size_on_reasm_queue += chk->send_size;
1042 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1044 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1046 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1048 * They sent a duplicate fsn number. This really
1049 * should not happen since the FSN is a TSN and it
1050 * should have been dropped earlier.
1052 sctp_abort_in_reasm(stcb, control, chk,
1054 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1059 if (inserted == 0) {
1060 /* Its at the end */
1061 asoc->size_on_reasm_queue += chk->send_size;
1062 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1063 control->top_fsn = chk->rec.data.fsn;
1064 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1069 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1070 struct sctp_stream_in *strm, int inp_read_lock_held)
1073 * Given a stream, strm, see if any of the SSN's on it that are
1074 * fragmented are ready to deliver. If so go ahead and place them on
1075 * the read queue. In so placing if we have hit the end, then we
1076 * need to remove them from the stream's queue.
1078 struct sctp_queued_to_read *control, *nctl = NULL;
1079 uint32_t next_to_del;
1083 if (stcb->sctp_socket) {
1084 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1085 stcb->sctp_ep->partial_delivery_point);
1087 pd_point = stcb->sctp_ep->partial_delivery_point;
1089 control = TAILQ_FIRST(&strm->uno_inqueue);
1091 if ((control != NULL) &&
1092 (asoc->idata_supported == 0)) {
1093 /* Special handling needed for "old" data format */
1094 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1098 if (strm->pd_api_started) {
1099 /* Can't add more */
1103 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1104 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1105 nctl = TAILQ_NEXT(control, next_instrm);
1106 if (control->end_added) {
1107 /* We just put the last bit on */
1108 if (control->on_strm_q) {
1110 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1111 panic("Huh control: %p on_q: %d -- not unordered?",
1112 control, control->on_strm_q);
1115 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1116 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1117 control->on_strm_q = 0;
1119 if (control->on_read_q == 0) {
1120 sctp_add_to_readq(stcb->sctp_ep, stcb,
1122 &stcb->sctp_socket->so_rcv, control->end_added,
1123 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1126 /* Can we do a PD-API for this un-ordered guy? */
1127 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1128 strm->pd_api_started = 1;
1129 control->pdapi_started = 1;
1130 sctp_add_to_readq(stcb->sctp_ep, stcb,
1132 &stcb->sctp_socket->so_rcv, control->end_added,
1133 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1141 control = TAILQ_FIRST(&strm->inqueue);
1142 if (strm->pd_api_started) {
1143 /* Can't add more */
1146 if (control == NULL) {
1149 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1151 * Ok the guy at the top was being partially delivered
1152 * completed, so we remove it. Note the pd_api flag was
1153 * taken off when the chunk was merged on in
1154 * sctp_queue_data_for_reasm below.
1156 nctl = TAILQ_NEXT(control, next_instrm);
1157 SCTPDBG(SCTP_DEBUG_XXX,
1158 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1159 control, control->end_added, control->mid,
1160 control->top_fsn, control->fsn_included,
1161 strm->last_mid_delivered);
1162 if (control->end_added) {
1163 if (control->on_strm_q) {
1165 if (control->on_strm_q != SCTP_ON_ORDERED) {
1166 panic("Huh control: %p on_q: %d -- not ordered?",
1167 control, control->on_strm_q);
1170 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1171 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1172 if (asoc->size_on_all_streams >= control->length) {
1173 asoc->size_on_all_streams -= control->length;
1176 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1178 asoc->size_on_all_streams = 0;
1181 sctp_ucount_decr(asoc->cnt_on_all_streams);
1182 control->on_strm_q = 0;
1184 if (strm->pd_api_started && control->pdapi_started) {
1185 control->pdapi_started = 0;
1186 strm->pd_api_started = 0;
1188 if (control->on_read_q == 0) {
1189 sctp_add_to_readq(stcb->sctp_ep, stcb,
1191 &stcb->sctp_socket->so_rcv, control->end_added,
1192 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1197 if (strm->pd_api_started) {
1199 * Can't add more must have gotten an un-ordered above being
1200 * partially delivered.
1205 next_to_del = strm->last_mid_delivered + 1;
1207 SCTPDBG(SCTP_DEBUG_XXX,
1208 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1209 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1211 nctl = TAILQ_NEXT(control, next_instrm);
1212 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1213 (control->first_frag_seen)) {
1216 /* Ok we can deliver it onto the stream. */
1217 if (control->end_added) {
1218 /* We are done with it afterwards */
1219 if (control->on_strm_q) {
1221 if (control->on_strm_q != SCTP_ON_ORDERED) {
1222 panic("Huh control: %p on_q: %d -- not ordered?",
1223 control, control->on_strm_q);
1226 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1227 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1228 if (asoc->size_on_all_streams >= control->length) {
1229 asoc->size_on_all_streams -= control->length;
1232 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1234 asoc->size_on_all_streams = 0;
1237 sctp_ucount_decr(asoc->cnt_on_all_streams);
1238 control->on_strm_q = 0;
1242 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1244 * A singleton now slipping through - mark
1245 * it non-revokable too
1247 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1248 } else if (control->end_added == 0) {
1250 * Check if we can defer adding until its
1253 if ((control->length < pd_point) || (strm->pd_api_started)) {
1255 * Don't need it or cannot add more
1256 * (one being delivered that way)
1261 done = (control->end_added) && (control->last_frag_seen);
1262 if (control->on_read_q == 0) {
1264 if (asoc->size_on_all_streams >= control->length) {
1265 asoc->size_on_all_streams -= control->length;
1268 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1270 asoc->size_on_all_streams = 0;
1273 strm->pd_api_started = 1;
1274 control->pdapi_started = 1;
1276 sctp_add_to_readq(stcb->sctp_ep, stcb,
1278 &stcb->sctp_socket->so_rcv, control->end_added,
1279 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1281 strm->last_mid_delivered = next_to_del;
1294 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1295 struct sctp_stream_in *strm,
1296 struct sctp_tcb *stcb, struct sctp_association *asoc,
1297 struct sctp_tmit_chunk *chk, int hold_rlock)
1300 * Given a control and a chunk, merge the data from the chk onto the
1301 * control and free up the chunk resources.
1306 if (control->on_read_q && (hold_rlock == 0)) {
1308 * Its being pd-api'd so we must do some locks.
1310 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1313 if (control->data == NULL) {
1314 control->data = chk->data;
1315 sctp_setup_tail_pointer(control);
1317 sctp_add_to_tail_pointer(control, chk->data, &added);
1319 control->fsn_included = chk->rec.data.fsn;
1320 asoc->size_on_reasm_queue -= chk->send_size;
1321 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1322 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1324 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1325 control->first_frag_seen = 1;
1326 control->sinfo_tsn = chk->rec.data.tsn;
1327 control->sinfo_ppid = chk->rec.data.ppid;
1329 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1331 if ((control->on_strm_q) && (control->on_read_q)) {
1332 if (control->pdapi_started) {
1333 control->pdapi_started = 0;
1334 strm->pd_api_started = 0;
1336 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1338 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1339 control->on_strm_q = 0;
1340 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1342 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1344 * Don't need to decrement
1345 * size_on_all_streams, since control is on
1348 sctp_ucount_decr(asoc->cnt_on_all_streams);
1349 control->on_strm_q = 0;
1351 } else if (control->on_strm_q) {
1352 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1353 control->on_strm_q);
1357 control->end_added = 1;
1358 control->last_frag_seen = 1;
1361 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1363 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1368 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1369 * queue, see if anthing can be delivered. If so pull it off (or as much as
1370 * we can. If we run out of space then we must dump what we can and set the
1371 * appropriate flag to say we queued what we could.
1374 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1375 struct sctp_queued_to_read *control,
1376 struct sctp_tmit_chunk *chk,
1377 int created_control,
1378 int *abort_flag, uint32_t tsn)
1381 struct sctp_tmit_chunk *at, *nat;
1382 struct sctp_stream_in *strm;
1383 int do_wakeup, unordered;
1386 strm = &asoc->strmin[control->sinfo_stream];
1388 * For old un-ordered data chunks.
1390 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1395 /* Must be added to the stream-in queue */
1396 if (created_control) {
1397 if (unordered == 0) {
1398 sctp_ucount_incr(asoc->cnt_on_all_streams);
1400 if (sctp_place_control_in_stream(strm, asoc, control)) {
1401 /* Duplicate SSN? */
1402 sctp_abort_in_reasm(stcb, control, chk,
1404 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1405 sctp_clean_up_control(stcb, control);
1408 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1410 * Ok we created this control and now lets validate
1411 * that its legal i.e. there is a B bit set, if not
1412 * and we have up to the cum-ack then its invalid.
1414 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1415 sctp_abort_in_reasm(stcb, control, chk,
1417 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1422 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1423 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1427 * Ok we must queue the chunk into the reasembly portion: o if its
1428 * the first it goes to the control mbuf. o if its not first but the
1429 * next in sequence it goes to the control, and each succeeding one
1430 * in order also goes. o if its not in order we place it on the list
1433 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1434 /* Its the very first one. */
1435 SCTPDBG(SCTP_DEBUG_XXX,
1436 "chunk is a first fsn: %u becomes fsn_included\n",
1438 if (control->first_frag_seen) {
1440 * Error on senders part, they either sent us two
1441 * data chunks with FIRST, or they sent two
1442 * un-ordered chunks that were fragmented at the
1443 * same time in the same stream.
1445 sctp_abort_in_reasm(stcb, control, chk,
1447 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1450 control->first_frag_seen = 1;
1451 control->sinfo_ppid = chk->rec.data.ppid;
1452 control->sinfo_tsn = chk->rec.data.tsn;
1453 control->fsn_included = chk->rec.data.fsn;
1454 control->data = chk->data;
1455 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1457 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1458 sctp_setup_tail_pointer(control);
1459 asoc->size_on_all_streams += control->length;
1461 /* Place the chunk in our list */
1464 if (control->last_frag_seen == 0) {
1465 /* Still willing to raise highest FSN seen */
1466 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1467 SCTPDBG(SCTP_DEBUG_XXX,
1468 "We have a new top_fsn: %u\n",
1470 control->top_fsn = chk->rec.data.fsn;
1472 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1473 SCTPDBG(SCTP_DEBUG_XXX,
1474 "The last fsn is now in place fsn: %u\n",
1476 control->last_frag_seen = 1;
1478 if (asoc->idata_supported || control->first_frag_seen) {
1480 * For IDATA we always check since we know
1481 * that the first fragment is 0. For old
1482 * DATA we have to receive the first before
1483 * we know the first FSN (which is the TSN).
1485 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1487 * We have already delivered up to
1490 sctp_abort_in_reasm(stcb, control, chk,
1492 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1497 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1498 /* Second last? huh? */
1499 SCTPDBG(SCTP_DEBUG_XXX,
1500 "Duplicate last fsn: %u (top: %u) -- abort\n",
1501 chk->rec.data.fsn, control->top_fsn);
1502 sctp_abort_in_reasm(stcb, control,
1504 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1507 if (asoc->idata_supported || control->first_frag_seen) {
1509 * For IDATA we always check since we know
1510 * that the first fragment is 0. For old
1511 * DATA we have to receive the first before
1512 * we know the first FSN (which is the TSN).
1515 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1517 * We have already delivered up to
1520 SCTPDBG(SCTP_DEBUG_XXX,
1521 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1522 chk->rec.data.fsn, control->fsn_included);
1523 sctp_abort_in_reasm(stcb, control, chk,
1525 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1530 * validate not beyond top FSN if we have seen last
1533 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1534 SCTPDBG(SCTP_DEBUG_XXX,
1535 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1538 sctp_abort_in_reasm(stcb, control, chk,
1540 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1545 * If we reach here, we need to place the new chunk in the
1546 * reassembly for this control.
1548 SCTPDBG(SCTP_DEBUG_XXX,
1549 "chunk is a not first fsn: %u needs to be inserted\n",
1551 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1552 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1554 * This one in queue is bigger than the new
1555 * one, insert the new one before at.
1557 SCTPDBG(SCTP_DEBUG_XXX,
1558 "Insert it before fsn: %u\n",
1560 asoc->size_on_reasm_queue += chk->send_size;
1561 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1562 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1565 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1567 * Gak, He sent me a duplicate str seq
1571 * foo bar, I guess I will just free this
1572 * new guy, should we abort too? FIX ME
1573 * MAYBE? Or it COULD be that the SSN's have
1574 * wrapped. Maybe I should compare to TSN
1575 * somehow... sigh for now just blow away
1578 SCTPDBG(SCTP_DEBUG_XXX,
1579 "Duplicate to fsn: %u -- abort\n",
1581 sctp_abort_in_reasm(stcb, control,
1583 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1587 if (inserted == 0) {
1588 /* Goes on the end */
1589 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1591 asoc->size_on_reasm_queue += chk->send_size;
1592 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1593 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1597 * Ok lets see if we can suck any up into the control structure that
1598 * are in seq if it makes sense.
1602 * If the first fragment has not been seen there is no sense in
1605 if (control->first_frag_seen) {
1606 next_fsn = control->fsn_included + 1;
1607 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1608 if (at->rec.data.fsn == next_fsn) {
1609 /* We can add this one now to the control */
1610 SCTPDBG(SCTP_DEBUG_XXX,
1611 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1614 next_fsn, control->fsn_included);
1615 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1616 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1617 if (control->on_read_q) {
1621 * We only add to the
1622 * size-on-all-streams if its not on
1623 * the read q. The read q flag will
1624 * cause a sballoc so its accounted
1627 asoc->size_on_all_streams += lenadded;
1630 if (control->end_added && control->pdapi_started) {
1631 if (strm->pd_api_started) {
1632 strm->pd_api_started = 0;
1633 control->pdapi_started = 0;
1635 if (control->on_read_q == 0) {
1636 sctp_add_to_readq(stcb->sctp_ep, stcb,
1638 &stcb->sctp_socket->so_rcv, control->end_added,
1639 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1649 /* Need to wakeup the reader */
1650 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1654 static struct sctp_queued_to_read *
1655 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1657 struct sctp_queued_to_read *control;
1660 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1661 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1666 if (idata_supported) {
1667 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1668 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1673 control = TAILQ_FIRST(&strm->uno_inqueue);
1680 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1681 struct mbuf **m, int offset, int chk_length,
1682 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1683 int *break_flag, int last_chunk, uint8_t chk_type)
1685 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1686 uint32_t tsn, fsn, gap, mid;
1689 int need_reasm_check = 0;
1691 struct mbuf *op_err;
1692 char msg[SCTP_DIAG_INFO_LEN];
1693 struct sctp_queued_to_read *control, *ncontrol;
1696 struct sctp_stream_reset_list *liste;
1699 int created_control = 0;
1701 if (chk_type == SCTP_IDATA) {
1702 struct sctp_idata_chunk *chunk, chunk_buf;
1704 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1705 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1706 chk_flags = chunk->ch.chunk_flags;
1707 clen = sizeof(struct sctp_idata_chunk);
1708 tsn = ntohl(chunk->dp.tsn);
1709 sid = ntohs(chunk->dp.sid);
1710 mid = ntohl(chunk->dp.mid);
1711 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1713 ppid = chunk->dp.ppid_fsn.ppid;
1715 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1716 ppid = 0xffffffff; /* Use as an invalid value. */
1719 struct sctp_data_chunk *chunk, chunk_buf;
1721 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1722 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1723 chk_flags = chunk->ch.chunk_flags;
1724 clen = sizeof(struct sctp_data_chunk);
1725 tsn = ntohl(chunk->dp.tsn);
1726 sid = ntohs(chunk->dp.sid);
1727 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1729 ppid = chunk->dp.ppid;
1731 if ((size_t)chk_length == clen) {
1733 * Need to send an abort since we had a empty data chunk.
1735 op_err = sctp_generate_no_user_data_cause(tsn);
1736 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1737 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1741 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1742 asoc->send_sack = 1;
1744 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1745 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1746 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1751 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1752 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1753 /* It is a duplicate */
1754 SCTP_STAT_INCR(sctps_recvdupdata);
1755 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1756 /* Record a dup for the next outbound sack */
1757 asoc->dup_tsns[asoc->numduptsns] = tsn;
1760 asoc->send_sack = 1;
1763 /* Calculate the number of TSN's between the base and this TSN */
1764 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1765 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1766 /* Can't hold the bit in the mapping at max array, toss it */
1769 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1770 SCTP_TCB_LOCK_ASSERT(stcb);
1771 if (sctp_expand_mapping_array(asoc, gap)) {
1772 /* Can't expand, drop it */
1776 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1779 /* See if we have received this one already */
1780 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1781 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1782 SCTP_STAT_INCR(sctps_recvdupdata);
1783 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1784 /* Record a dup for the next outbound sack */
1785 asoc->dup_tsns[asoc->numduptsns] = tsn;
1788 asoc->send_sack = 1;
1792 * Check to see about the GONE flag, duplicates would cause a sack
1793 * to be sent up above
1795 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1796 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1797 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1799 * wait a minute, this guy is gone, there is no longer a
1800 * receiver. Send peer an ABORT!
1802 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1803 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1808 * Now before going further we see if there is room. If NOT then we
1809 * MAY let one through only IF this TSN is the one we are waiting
1810 * for on a partial delivery API.
1813 /* Is the stream valid? */
1814 if (sid >= asoc->streamincnt) {
1815 struct sctp_error_invalid_stream *cause;
1817 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1818 0, M_NOWAIT, 1, MT_DATA);
1819 if (op_err != NULL) {
1820 /* add some space up front so prepend will work well */
1821 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1822 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1824 * Error causes are just param's and this one has
1825 * two back to back phdr, one with the error type
1826 * and size, the other with the streamid and a rsvd
1828 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1829 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1830 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1831 cause->stream_id = htons(sid);
1832 cause->reserved = htons(0);
1833 sctp_queue_op_err(stcb, op_err);
1835 SCTP_STAT_INCR(sctps_badsid);
1836 SCTP_TCB_LOCK_ASSERT(stcb);
1837 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1838 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1839 asoc->highest_tsn_inside_nr_map = tsn;
1841 if (tsn == (asoc->cumulative_tsn + 1)) {
1842 /* Update cum-ack */
1843 asoc->cumulative_tsn = tsn;
1848 * If its a fragmented message, lets see if we can find the control
1849 * on the reassembly queues.
1851 if ((chk_type == SCTP_IDATA) &&
1852 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1855 * The first *must* be fsn 0, and other (middle/end) pieces
1856 * can *not* be fsn 0. XXX: This can happen in case of a
1857 * wrap around. Ignore is for now.
1859 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1863 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1864 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1865 chk_flags, control);
1866 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1867 /* See if we can find the re-assembly entity */
1868 if (control != NULL) {
1869 /* We found something, does it belong? */
1870 if (ordered && (mid != control->mid)) {
1871 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1873 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1874 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1875 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1879 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1881 * We can't have a switched order with an
1884 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1888 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1890 * We can't have a switched unordered with a
1893 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1900 * Its a complete segment. Lets validate we don't have a
1901 * re-assembly going on with the same Stream/Seq (for
1902 * ordered) or in the same Stream for unordered.
1904 if (control != NULL) {
1905 if (ordered || asoc->idata_supported) {
1906 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1908 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1911 if ((tsn == control->fsn_included + 1) &&
1912 (control->end_added == 0)) {
1913 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1921 /* now do the tests */
1922 if (((asoc->cnt_on_all_streams +
1923 asoc->cnt_on_reasm_queue +
1924 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1925 (((int)asoc->my_rwnd) <= 0)) {
1927 * When we have NO room in the rwnd we check to make sure
1928 * the reader is doing its job...
1930 if (stcb->sctp_socket->so_rcv.sb_cc) {
1931 /* some to read, wake-up */
1932 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1935 so = SCTP_INP_SO(stcb->sctp_ep);
1936 atomic_add_int(&stcb->asoc.refcnt, 1);
1937 SCTP_TCB_UNLOCK(stcb);
1938 SCTP_SOCKET_LOCK(so, 1);
1939 SCTP_TCB_LOCK(stcb);
1940 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1941 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1942 /* assoc was freed while we were unlocked */
1943 SCTP_SOCKET_UNLOCK(so, 1);
1947 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1948 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1949 SCTP_SOCKET_UNLOCK(so, 1);
1952 /* now is it in the mapping array of what we have accepted? */
1953 if (chk_type == SCTP_DATA) {
1954 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1955 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1956 /* Nope not in the valid range dump it */
1958 sctp_set_rwnd(stcb, asoc);
1959 if ((asoc->cnt_on_all_streams +
1960 asoc->cnt_on_reasm_queue +
1961 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1962 SCTP_STAT_INCR(sctps_datadropchklmt);
1964 SCTP_STAT_INCR(sctps_datadroprwnd);
1970 if (control == NULL) {
1973 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1978 #ifdef SCTP_ASOCLOG_OF_TSNS
1979 SCTP_TCB_LOCK_ASSERT(stcb);
1980 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1981 asoc->tsn_in_at = 0;
1982 asoc->tsn_in_wrapped = 1;
1984 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1985 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1986 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1987 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1988 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1989 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1990 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1991 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1995 * Before we continue lets validate that we are not being fooled by
1996 * an evil attacker. We can only have Nk chunks based on our TSN
1997 * spread allowed by the mapping array N * 8 bits, so there is no
1998 * way our stream sequence numbers could have wrapped. We of course
1999 * only validate the FIRST fragment so the bit must be set.
2001 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2002 (TAILQ_EMPTY(&asoc->resetHead)) &&
2003 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2004 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2005 /* The incoming sseq is behind where we last delivered? */
2006 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2007 mid, asoc->strmin[sid].last_mid_delivered);
2009 if (asoc->idata_supported) {
2010 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2011 asoc->strmin[sid].last_mid_delivered,
2016 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2017 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2022 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2023 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2024 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2028 if (chk_type == SCTP_IDATA) {
2029 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2031 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2033 if (last_chunk == 0) {
2034 if (chk_type == SCTP_IDATA) {
2035 dmbuf = SCTP_M_COPYM(*m,
2036 (offset + sizeof(struct sctp_idata_chunk)),
2039 dmbuf = SCTP_M_COPYM(*m,
2040 (offset + sizeof(struct sctp_data_chunk)),
2043 #ifdef SCTP_MBUF_LOGGING
2044 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2045 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2049 /* We can steal the last chunk */
2053 /* lop off the top part */
2054 if (chk_type == SCTP_IDATA) {
2055 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2057 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2059 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2060 l_len = SCTP_BUF_LEN(dmbuf);
2063 * need to count up the size hopefully does not hit
2069 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2070 l_len += SCTP_BUF_LEN(lat);
2073 if (l_len > the_len) {
2074 /* Trim the end round bytes off too */
2075 m_adj(dmbuf, -(l_len - the_len));
2078 if (dmbuf == NULL) {
2079 SCTP_STAT_INCR(sctps_nomem);
2083 * Now no matter what, we need a control, get one if we don't have
2084 * one (we may have gotten it above when we found the message was
2087 if (control == NULL) {
2088 sctp_alloc_a_readq(stcb, control);
2089 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2094 if (control == NULL) {
2095 SCTP_STAT_INCR(sctps_nomem);
2098 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2101 control->data = dmbuf;
2102 for (mm = control->data; mm; mm = mm->m_next) {
2103 control->length += SCTP_BUF_LEN(mm);
2105 control->tail_mbuf = NULL;
2106 control->end_added = 1;
2107 control->last_frag_seen = 1;
2108 control->first_frag_seen = 1;
2109 control->fsn_included = fsn;
2110 control->top_fsn = fsn;
2112 created_control = 1;
2114 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2115 chk_flags, ordered, mid, control);
2116 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2117 TAILQ_EMPTY(&asoc->resetHead) &&
2119 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2120 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2121 /* Candidate for express delivery */
2123 * Its not fragmented, No PD-API is up, Nothing in the
2124 * delivery queue, Its un-ordered OR ordered and the next to
2125 * deliver AND nothing else is stuck on the stream queue,
2126 * And there is room for it in the socket buffer. Lets just
2127 * stuff it up the buffer....
2129 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2130 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2131 asoc->highest_tsn_inside_nr_map = tsn;
2133 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2136 sctp_add_to_readq(stcb->sctp_ep, stcb,
2137 control, &stcb->sctp_socket->so_rcv,
2138 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2140 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2141 /* for ordered, bump what we delivered */
2142 asoc->strmin[sid].last_mid_delivered++;
2144 SCTP_STAT_INCR(sctps_recvexpress);
2145 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2146 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2147 SCTP_STR_LOG_FROM_EXPRS_DEL);
2150 goto finish_express_del;
2153 /* Now will we need a chunk too? */
2154 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2155 sctp_alloc_a_chunk(stcb, chk);
2157 /* No memory so we drop the chunk */
2158 SCTP_STAT_INCR(sctps_nomem);
2159 if (last_chunk == 0) {
2160 /* we copied it, free the copy */
2161 sctp_m_freem(dmbuf);
2165 chk->rec.data.tsn = tsn;
2166 chk->no_fr_allowed = 0;
2167 chk->rec.data.fsn = fsn;
2168 chk->rec.data.mid = mid;
2169 chk->rec.data.sid = sid;
2170 chk->rec.data.ppid = ppid;
2171 chk->rec.data.context = stcb->asoc.context;
2172 chk->rec.data.doing_fast_retransmit = 0;
2173 chk->rec.data.rcv_flags = chk_flags;
2175 chk->send_size = the_len;
2177 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2180 atomic_add_int(&net->ref_count, 1);
2183 /* Set the appropriate TSN mark */
2184 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2185 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2186 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2187 asoc->highest_tsn_inside_nr_map = tsn;
2190 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2191 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2192 asoc->highest_tsn_inside_map = tsn;
2195 /* Now is it complete (i.e. not fragmented)? */
2196 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2198 * Special check for when streams are resetting. We could be
2199 * more smart about this and check the actual stream to see
2200 * if it is not being reset.. that way we would not create a
2201 * HOLB when amongst streams being reset and those not being
2205 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2206 SCTP_TSN_GT(tsn, liste->tsn)) {
2208 * yep its past where we need to reset... go ahead
2211 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2213 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2215 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2216 unsigned char inserted = 0;
2218 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2219 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2224 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2229 if (inserted == 0) {
2231 * must be put at end, use prevP
2232 * (all setup from loop) to setup
2235 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2238 goto finish_express_del;
2240 if (chk_flags & SCTP_DATA_UNORDERED) {
2241 /* queue directly into socket buffer */
2242 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2244 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2245 sctp_add_to_readq(stcb->sctp_ep, stcb,
2247 &stcb->sctp_socket->so_rcv, 1,
2248 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2251 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2253 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2261 goto finish_express_del;
2263 /* If we reach here its a reassembly */
2264 need_reasm_check = 1;
2265 SCTPDBG(SCTP_DEBUG_XXX,
2266 "Queue data to stream for reasm control: %p MID: %u\n",
2268 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2271 * the assoc is now gone and chk was put onto the reasm
2272 * queue, which has all been freed.
2280 /* Here we tidy up things */
2281 if (tsn == (asoc->cumulative_tsn + 1)) {
2282 /* Update cum-ack */
2283 asoc->cumulative_tsn = tsn;
2289 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2291 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2293 SCTP_STAT_INCR(sctps_recvdata);
2294 /* Set it present please */
2295 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2296 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2298 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2299 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2300 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2302 if (need_reasm_check) {
2303 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2304 need_reasm_check = 0;
2306 /* check the special flag for stream resets */
2307 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2308 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2310 * we have finished working through the backlogged TSN's now
2311 * time to reset streams. 1: call reset function. 2: free
2312 * pending_reply space 3: distribute any chunks in
2313 * pending_reply_queue.
2315 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2316 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2317 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2318 SCTP_FREE(liste, SCTP_M_STRESET);
2319 /* sa_ignore FREED_MEMORY */
2320 liste = TAILQ_FIRST(&asoc->resetHead);
2321 if (TAILQ_EMPTY(&asoc->resetHead)) {
2322 /* All can be removed */
2323 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2324 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2325 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2329 if (need_reasm_check) {
2330 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2331 need_reasm_check = 0;
2335 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2336 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2340 * if control->sinfo_tsn is <= liste->tsn we
2341 * can process it which is the NOT of
2342 * control->sinfo_tsn > liste->tsn
2344 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2345 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2349 if (need_reasm_check) {
2350 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2351 need_reasm_check = 0;
2359 static const int8_t sctp_map_lookup_tab[256] = {
2360 0, 1, 0, 2, 0, 1, 0, 3,
2361 0, 1, 0, 2, 0, 1, 0, 4,
2362 0, 1, 0, 2, 0, 1, 0, 3,
2363 0, 1, 0, 2, 0, 1, 0, 5,
2364 0, 1, 0, 2, 0, 1, 0, 3,
2365 0, 1, 0, 2, 0, 1, 0, 4,
2366 0, 1, 0, 2, 0, 1, 0, 3,
2367 0, 1, 0, 2, 0, 1, 0, 6,
2368 0, 1, 0, 2, 0, 1, 0, 3,
2369 0, 1, 0, 2, 0, 1, 0, 4,
2370 0, 1, 0, 2, 0, 1, 0, 3,
2371 0, 1, 0, 2, 0, 1, 0, 5,
2372 0, 1, 0, 2, 0, 1, 0, 3,
2373 0, 1, 0, 2, 0, 1, 0, 4,
2374 0, 1, 0, 2, 0, 1, 0, 3,
2375 0, 1, 0, 2, 0, 1, 0, 7,
2376 0, 1, 0, 2, 0, 1, 0, 3,
2377 0, 1, 0, 2, 0, 1, 0, 4,
2378 0, 1, 0, 2, 0, 1, 0, 3,
2379 0, 1, 0, 2, 0, 1, 0, 5,
2380 0, 1, 0, 2, 0, 1, 0, 3,
2381 0, 1, 0, 2, 0, 1, 0, 4,
2382 0, 1, 0, 2, 0, 1, 0, 3,
2383 0, 1, 0, 2, 0, 1, 0, 6,
2384 0, 1, 0, 2, 0, 1, 0, 3,
2385 0, 1, 0, 2, 0, 1, 0, 4,
2386 0, 1, 0, 2, 0, 1, 0, 3,
2387 0, 1, 0, 2, 0, 1, 0, 5,
2388 0, 1, 0, 2, 0, 1, 0, 3,
2389 0, 1, 0, 2, 0, 1, 0, 4,
2390 0, 1, 0, 2, 0, 1, 0, 3,
2391 0, 1, 0, 2, 0, 1, 0, 8
2396 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2399 * Now we also need to check the mapping array in a couple of ways.
2400 * 1) Did we move the cum-ack point?
2402 * When you first glance at this you might think that all entries
2403 * that make up the position of the cum-ack would be in the
2404 * nr-mapping array only.. i.e. things up to the cum-ack are always
2405 * deliverable. Thats true with one exception, when its a fragmented
2406 * message we may not deliver the data until some threshold (or all
2407 * of it) is in place. So we must OR the nr_mapping_array and
2408 * mapping_array to get a true picture of the cum-ack.
2410 struct sctp_association *asoc;
2413 int slide_from, slide_end, lgap, distance;
2414 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2418 old_cumack = asoc->cumulative_tsn;
2419 old_base = asoc->mapping_array_base_tsn;
2420 old_highest = asoc->highest_tsn_inside_map;
2422 * We could probably improve this a small bit by calculating the
2423 * offset of the current cum-ack as the starting point.
2426 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2427 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2431 /* there is a 0 bit */
2432 at += sctp_map_lookup_tab[val];
2436 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2438 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2439 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2441 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2442 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2444 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2445 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2446 sctp_print_mapping_array(asoc);
2447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2448 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2450 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2451 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2454 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2455 highest_tsn = asoc->highest_tsn_inside_nr_map;
2457 highest_tsn = asoc->highest_tsn_inside_map;
2459 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2460 /* The complete array was completed by a single FR */
2461 /* highest becomes the cum-ack */
2467 /* clear the array */
2468 clr = ((at + 7) >> 3);
2469 if (clr > asoc->mapping_array_size) {
2470 clr = asoc->mapping_array_size;
2472 memset(asoc->mapping_array, 0, clr);
2473 memset(asoc->nr_mapping_array, 0, clr);
2475 for (i = 0; i < asoc->mapping_array_size; i++) {
2476 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2477 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2478 sctp_print_mapping_array(asoc);
2482 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2483 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2484 } else if (at >= 8) {
2485 /* we can slide the mapping array down */
2486 /* slide_from holds where we hit the first NON 0xff byte */
2489 * now calculate the ceiling of the move using our highest
2492 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2493 slide_end = (lgap >> 3);
2494 if (slide_end < slide_from) {
2495 sctp_print_mapping_array(asoc);
2497 panic("impossible slide");
2499 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2500 lgap, slide_end, slide_from, at);
2504 if (slide_end > asoc->mapping_array_size) {
2506 panic("would overrun buffer");
2508 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2509 asoc->mapping_array_size, slide_end);
2510 slide_end = asoc->mapping_array_size;
2513 distance = (slide_end - slide_from) + 1;
2514 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2515 sctp_log_map(old_base, old_cumack, old_highest,
2516 SCTP_MAP_PREPARE_SLIDE);
2517 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2518 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2520 if (distance + slide_from > asoc->mapping_array_size ||
2523 * Here we do NOT slide forward the array so that
2524 * hopefully when more data comes in to fill it up
2525 * we will be able to slide it forward. Really I
2526 * don't think this should happen :-0
2529 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2530 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2531 (uint32_t)asoc->mapping_array_size,
2532 SCTP_MAP_SLIDE_NONE);
2537 for (ii = 0; ii < distance; ii++) {
2538 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2539 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2542 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2543 asoc->mapping_array[ii] = 0;
2544 asoc->nr_mapping_array[ii] = 0;
2546 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2547 asoc->highest_tsn_inside_map += (slide_from << 3);
2549 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2550 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2552 asoc->mapping_array_base_tsn += (slide_from << 3);
2553 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2554 sctp_log_map(asoc->mapping_array_base_tsn,
2555 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2556 SCTP_MAP_SLIDE_RESULT);
2563 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2565 struct sctp_association *asoc;
2566 uint32_t highest_tsn;
2569 sctp_slide_mapping_arrays(stcb);
2571 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2572 highest_tsn = asoc->highest_tsn_inside_nr_map;
2574 highest_tsn = asoc->highest_tsn_inside_map;
2576 /* Is there a gap now? */
2577 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2580 * Now we need to see if we need to queue a sack or just start the
2581 * timer (if allowed).
2583 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2585 * Ok special case, in SHUTDOWN-SENT case. here we maker
2586 * sure SACK timer is off and instead send a SHUTDOWN and a
2589 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2590 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2591 stcb->sctp_ep, stcb, NULL,
2592 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2594 sctp_send_shutdown(stcb,
2595 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2597 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2601 * CMT DAC algorithm: increase number of packets received
2604 stcb->asoc.cmt_dac_pkts_rcvd++;
2606 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2608 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2610 (stcb->asoc.numduptsns) || /* we have dup's */
2611 (is_a_gap) || /* is still a gap */
2612 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2613 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2616 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2617 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2618 (stcb->asoc.send_sack == 0) &&
2619 (stcb->asoc.numduptsns == 0) &&
2620 (stcb->asoc.delayed_ack) &&
2621 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2624 * CMT DAC algorithm: With CMT, delay acks
2625 * even in the face of
2627 * reordering. Therefore, if acks that do
2628 * not have to be sent because of the above
2629 * reasons, will be delayed. That is, acks
2630 * that would have been sent due to gap
2631 * reports will be delayed with DAC. Start
2632 * the delayed ack timer.
2634 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2635 stcb->sctp_ep, stcb, NULL);
2638 * Ok we must build a SACK since the timer
2639 * is pending, we got our first packet OR
2640 * there are gaps or duplicates.
2642 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2643 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2646 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2647 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2648 stcb->sctp_ep, stcb, NULL);
2655 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2656 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2657 struct sctp_nets *net, uint32_t *high_tsn)
2659 struct sctp_chunkhdr *ch, chunk_buf;
2660 struct sctp_association *asoc;
2661 int num_chunks = 0; /* number of control chunks processed */
2663 int break_flag, last_chunk;
2664 int abort_flag = 0, was_a_gap;
2666 uint32_t highest_tsn;
2667 uint16_t chk_length;
2670 sctp_set_rwnd(stcb, &stcb->asoc);
2673 SCTP_TCB_LOCK_ASSERT(stcb);
2675 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2676 highest_tsn = asoc->highest_tsn_inside_nr_map;
2678 highest_tsn = asoc->highest_tsn_inside_map;
2680 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2682 * setup where we got the last DATA packet from for any SACK that
2683 * may need to go out. Don't bump the net. This is done ONLY when a
2684 * chunk is assigned.
2686 asoc->last_data_chunk_from = net;
2689 * Now before we proceed we must figure out if this is a wasted
2690 * cluster... i.e. it is a small packet sent in and yet the driver
2691 * underneath allocated a full cluster for it. If so we must copy it
2692 * to a smaller mbuf and free up the cluster mbuf. This will help
2693 * with cluster starvation. Note for __Panda__ we don't do this
2694 * since it has clusters all the way down to 64 bytes.
2696 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2697 /* we only handle mbufs that are singletons.. not chains */
2698 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2700 /* ok lets see if we can copy the data up */
2703 /* get the pointers and copy */
2704 to = mtod(m, caddr_t *);
2705 from = mtod((*mm), caddr_t *);
2706 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2707 /* copy the length and free up the old */
2708 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2710 /* success, back copy */
2713 /* We are in trouble in the mbuf world .. yikes */
2717 /* get pointer to the first chunk header */
2718 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2719 sizeof(struct sctp_chunkhdr),
2720 (uint8_t *)&chunk_buf);
2725 * process all DATA chunks...
2727 *high_tsn = asoc->cumulative_tsn;
2729 asoc->data_pkts_seen++;
2730 while (stop_proc == 0) {
2731 /* validate chunk length */
2732 chk_length = ntohs(ch->chunk_length);
2733 if (length - *offset < chk_length) {
2734 /* all done, mutulated chunk */
2738 if ((asoc->idata_supported == 1) &&
2739 (ch->chunk_type == SCTP_DATA)) {
2740 struct mbuf *op_err;
2741 char msg[SCTP_DIAG_INFO_LEN];
2743 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2744 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2745 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2746 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2749 if ((asoc->idata_supported == 0) &&
2750 (ch->chunk_type == SCTP_IDATA)) {
2751 struct mbuf *op_err;
2752 char msg[SCTP_DIAG_INFO_LEN];
2754 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2755 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2756 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2757 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2760 if ((ch->chunk_type == SCTP_DATA) ||
2761 (ch->chunk_type == SCTP_IDATA)) {
2764 if (ch->chunk_type == SCTP_DATA) {
2765 clen = sizeof(struct sctp_data_chunk);
2767 clen = sizeof(struct sctp_idata_chunk);
2769 if (chk_length < clen) {
2771 * Need to send an abort since we had a
2772 * invalid data chunk.
2774 struct mbuf *op_err;
2775 char msg[SCTP_DIAG_INFO_LEN];
2777 snprintf(msg, sizeof(msg), "%s chunk of length %u",
2778 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2780 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2781 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2782 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2785 #ifdef SCTP_AUDITING_ENABLED
2786 sctp_audit_log(0xB1, 0);
2788 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2793 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2794 chk_length, net, high_tsn, &abort_flag, &break_flag,
2795 last_chunk, ch->chunk_type)) {
2803 * Set because of out of rwnd space and no
2804 * drop rep space left.
2810 /* not a data chunk in the data region */
2811 switch (ch->chunk_type) {
2812 case SCTP_INITIATION:
2813 case SCTP_INITIATION_ACK:
2814 case SCTP_SELECTIVE_ACK:
2815 case SCTP_NR_SELECTIVE_ACK:
2816 case SCTP_HEARTBEAT_REQUEST:
2817 case SCTP_HEARTBEAT_ACK:
2818 case SCTP_ABORT_ASSOCIATION:
2820 case SCTP_SHUTDOWN_ACK:
2821 case SCTP_OPERATION_ERROR:
2822 case SCTP_COOKIE_ECHO:
2823 case SCTP_COOKIE_ACK:
2826 case SCTP_SHUTDOWN_COMPLETE:
2827 case SCTP_AUTHENTICATION:
2828 case SCTP_ASCONF_ACK:
2829 case SCTP_PACKET_DROPPED:
2830 case SCTP_STREAM_RESET:
2831 case SCTP_FORWARD_CUM_TSN:
2835 * Now, what do we do with KNOWN
2836 * chunks that are NOT in the right
2839 * For now, I do nothing but ignore
2840 * them. We may later want to add
2841 * sysctl stuff to switch out and do
2842 * either an ABORT() or possibly
2845 struct mbuf *op_err;
2846 char msg[SCTP_DIAG_INFO_LEN];
2848 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2850 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2851 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2856 * Unknown chunk type: use bit rules after
2859 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2861 * Need to send an abort since we
2862 * had a invalid chunk.
2864 struct mbuf *op_err;
2865 char msg[SCTP_DIAG_INFO_LEN];
2867 snprintf(msg, sizeof(msg), "Chunk of length %u",
2869 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2870 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2871 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2874 if (ch->chunk_type & 0x40) {
2875 /* Add a error report to the queue */
2876 struct mbuf *op_err;
2877 struct sctp_gen_error_cause *cause;
2879 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2880 0, M_NOWAIT, 1, MT_DATA);
2881 if (op_err != NULL) {
2882 cause = mtod(op_err, struct sctp_gen_error_cause *);
2883 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2884 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2885 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2886 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2887 if (SCTP_BUF_NEXT(op_err) != NULL) {
2888 sctp_queue_op_err(stcb, op_err);
2890 sctp_m_freem(op_err);
2894 if ((ch->chunk_type & 0x80) == 0) {
2895 /* discard the rest of this packet */
2897 } /* else skip this bad chunk and
2900 } /* switch of chunk type */
2902 *offset += SCTP_SIZE32(chk_length);
2903 if ((*offset >= length) || stop_proc) {
2904 /* no more data left in the mbuf chain */
2908 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2909 sizeof(struct sctp_chunkhdr),
2910 (uint8_t *)&chunk_buf);
2919 * we need to report rwnd overrun drops.
2921 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2925 * Did we get data, if so update the time for auto-close and
2926 * give peer credit for being alive.
2928 SCTP_STAT_INCR(sctps_recvpktwithdata);
2929 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2930 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2931 stcb->asoc.overall_error_count,
2933 SCTP_FROM_SCTP_INDATA,
2936 stcb->asoc.overall_error_count = 0;
2937 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2939 /* now service all of the reassm queue if needed */
2940 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2941 /* Assure that we ack right away */
2942 stcb->asoc.send_sack = 1;
2944 /* Start a sack timer or QUEUE a SACK for sending */
2945 sctp_sack_check(stcb, was_a_gap);
2950 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2951 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2953 uint32_t *biggest_newly_acked_tsn,
2954 uint32_t *this_sack_lowest_newack,
2957 struct sctp_tmit_chunk *tp1;
2958 unsigned int theTSN;
2959 int j, wake_him = 0, circled = 0;
2961 /* Recover the tp1 we last saw */
2964 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2966 for (j = frag_strt; j <= frag_end; j++) {
2967 theTSN = j + last_tsn;
2969 if (tp1->rec.data.doing_fast_retransmit)
2973 * CMT: CUCv2 algorithm. For each TSN being
2974 * processed from the sent queue, track the
2975 * next expected pseudo-cumack, or
2976 * rtx_pseudo_cumack, if required. Separate
2977 * cumack trackers for first transmissions,
2978 * and retransmissions.
2980 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2981 (tp1->whoTo->find_pseudo_cumack == 1) &&
2982 (tp1->snd_count == 1)) {
2983 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2984 tp1->whoTo->find_pseudo_cumack = 0;
2986 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2987 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2988 (tp1->snd_count > 1)) {
2989 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2990 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2992 if (tp1->rec.data.tsn == theTSN) {
2993 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2995 * must be held until
2998 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3000 * If it is less than RESEND, it is
3001 * now no-longer in flight.
3002 * Higher values may already be set
3003 * via previous Gap Ack Blocks...
3004 * i.e. ACKED or RESEND.
3006 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3007 *biggest_newly_acked_tsn)) {
3008 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3011 * CMT: SFR algo (and HTNA) - set
3012 * saw_newack to 1 for dest being
3013 * newly acked. update
3014 * this_sack_highest_newack if
3017 if (tp1->rec.data.chunk_was_revoked == 0)
3018 tp1->whoTo->saw_newack = 1;
3020 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3021 tp1->whoTo->this_sack_highest_newack)) {
3022 tp1->whoTo->this_sack_highest_newack =
3026 * CMT DAC algo: also update
3027 * this_sack_lowest_newack
3029 if (*this_sack_lowest_newack == 0) {
3030 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3031 sctp_log_sack(*this_sack_lowest_newack,
3036 SCTP_LOG_TSN_ACKED);
3038 *this_sack_lowest_newack = tp1->rec.data.tsn;
3041 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3042 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3043 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3044 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3045 * Separate pseudo_cumack trackers for first transmissions and
3048 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3049 if (tp1->rec.data.chunk_was_revoked == 0) {
3050 tp1->whoTo->new_pseudo_cumack = 1;
3052 tp1->whoTo->find_pseudo_cumack = 1;
3054 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3055 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3057 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3058 if (tp1->rec.data.chunk_was_revoked == 0) {
3059 tp1->whoTo->new_pseudo_cumack = 1;
3061 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3063 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3064 sctp_log_sack(*biggest_newly_acked_tsn,
3069 SCTP_LOG_TSN_ACKED);
3071 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3072 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3073 tp1->whoTo->flight_size,
3075 (uint32_t)(uintptr_t)tp1->whoTo,
3078 sctp_flight_size_decrease(tp1);
3079 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3080 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3083 sctp_total_flight_decrease(stcb, tp1);
3085 tp1->whoTo->net_ack += tp1->send_size;
3086 if (tp1->snd_count < 2) {
3088 * True non-retransmited chunk
3090 tp1->whoTo->net_ack2 += tp1->send_size;
3098 sctp_calculate_rto(stcb,
3101 &tp1->sent_rcv_time,
3102 SCTP_RTT_FROM_DATA);
3105 if (tp1->whoTo->rto_needed == 0) {
3106 tp1->whoTo->rto_needed = 1;
3113 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3114 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3115 stcb->asoc.this_sack_highest_gap)) {
3116 stcb->asoc.this_sack_highest_gap =
3119 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3120 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3121 #ifdef SCTP_AUDITING_ENABLED
3122 sctp_audit_log(0xB2,
3123 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3128 * All chunks NOT UNSENT fall through here and are marked
3129 * (leave PR-SCTP ones that are to skip alone though)
3131 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3132 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3133 tp1->sent = SCTP_DATAGRAM_MARKED;
3135 if (tp1->rec.data.chunk_was_revoked) {
3136 /* deflate the cwnd */
3137 tp1->whoTo->cwnd -= tp1->book_size;
3138 tp1->rec.data.chunk_was_revoked = 0;
3140 /* NR Sack code here */
3142 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3143 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3144 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3147 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3150 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3151 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3152 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3153 stcb->asoc.trigger_reset = 1;
3155 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3161 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3162 sctp_m_freem(tp1->data);
3169 } /* if (tp1->tsn == theTSN) */
3170 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3173 tp1 = TAILQ_NEXT(tp1, sctp_next);
3174 if ((tp1 == NULL) && (circled == 0)) {
3176 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3178 } /* end while (tp1) */
3181 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3183 /* In case the fragments were not in order we must reset */
3184 } /* end for (j = fragStart */
3186 return (wake_him); /* Return value only used for nr-sack */
3191 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3192 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3193 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3194 int num_seg, int num_nr_seg, int *rto_ok)
3196 struct sctp_gap_ack_block *frag, block;
3197 struct sctp_tmit_chunk *tp1;
3202 uint16_t frag_strt, frag_end, prev_frag_end;
3204 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3208 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3211 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3213 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3214 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3215 *offset += sizeof(block);
3217 return (chunk_freed);
3219 frag_strt = ntohs(frag->start);
3220 frag_end = ntohs(frag->end);
3222 if (frag_strt > frag_end) {
3223 /* This gap report is malformed, skip it. */
3226 if (frag_strt <= prev_frag_end) {
3227 /* This gap report is not in order, so restart. */
3228 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3230 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3231 *biggest_tsn_acked = last_tsn + frag_end;
3238 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3239 non_revocable, &num_frs, biggest_newly_acked_tsn,
3240 this_sack_lowest_newack, rto_ok)) {
3243 prev_frag_end = frag_end;
3245 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3247 sctp_log_fr(*biggest_tsn_acked,
3248 *biggest_newly_acked_tsn,
3249 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3251 return (chunk_freed);
3255 sctp_check_for_revoked(struct sctp_tcb *stcb,
3256 struct sctp_association *asoc, uint32_t cumack,
3257 uint32_t biggest_tsn_acked)
3259 struct sctp_tmit_chunk *tp1;
3261 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3262 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3264 * ok this guy is either ACK or MARKED. If it is
3265 * ACKED it has been previously acked but not this
3266 * time i.e. revoked. If it is MARKED it was ACK'ed
3269 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3272 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3273 /* it has been revoked */
3274 tp1->sent = SCTP_DATAGRAM_SENT;
3275 tp1->rec.data.chunk_was_revoked = 1;
3277 * We must add this stuff back in to assure
3278 * timers and such get started.
3280 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3281 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3282 tp1->whoTo->flight_size,
3284 (uint32_t)(uintptr_t)tp1->whoTo,
3287 sctp_flight_size_increase(tp1);
3288 sctp_total_flight_increase(stcb, tp1);
3290 * We inflate the cwnd to compensate for our
3291 * artificial inflation of the flight_size.
3293 tp1->whoTo->cwnd += tp1->book_size;
3294 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3295 sctp_log_sack(asoc->last_acked_seq,
3300 SCTP_LOG_TSN_REVOKED);
3302 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3303 /* it has been re-acked in this SACK */
3304 tp1->sent = SCTP_DATAGRAM_ACKED;
3307 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3314 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3315 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3317 struct sctp_tmit_chunk *tp1;
3318 int strike_flag = 0;
3320 int tot_retrans = 0;
3321 uint32_t sending_seq;
3322 struct sctp_nets *net;
3323 int num_dests_sacked = 0;
3326 * select the sending_seq, this is either the next thing ready to be
3327 * sent but not transmitted, OR, the next seq we assign.
3329 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3331 sending_seq = asoc->sending_seq;
3333 sending_seq = tp1->rec.data.tsn;
3336 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3337 if ((asoc->sctp_cmt_on_off > 0) &&
3338 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3339 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3340 if (net->saw_newack)
3344 if (stcb->asoc.prsctp_supported) {
3345 (void)SCTP_GETTIME_TIMEVAL(&now);
3347 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3349 if (tp1->no_fr_allowed) {
3350 /* this one had a timeout or something */
3353 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3354 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3355 sctp_log_fr(biggest_tsn_newly_acked,
3358 SCTP_FR_LOG_CHECK_STRIKE);
3360 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3361 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3365 if (stcb->asoc.prsctp_supported) {
3366 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3367 /* Is it expired? */
3368 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3369 /* Yes so drop it */
3370 if (tp1->data != NULL) {
3371 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3372 SCTP_SO_NOT_LOCKED);
3379 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3380 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3381 /* we are beyond the tsn in the sack */
3384 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3385 /* either a RESEND, ACKED, or MARKED */
3387 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3388 /* Continue strikin FWD-TSN chunks */
3389 tp1->rec.data.fwd_tsn_cnt++;
3394 * CMT : SFR algo (covers part of DAC and HTNA as well)
3396 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3398 * No new acks were receieved for data sent to this
3399 * dest. Therefore, according to the SFR algo for
3400 * CMT, no data sent to this dest can be marked for
3401 * FR using this SACK.
3404 } else if (tp1->whoTo &&
3405 SCTP_TSN_GT(tp1->rec.data.tsn,
3406 tp1->whoTo->this_sack_highest_newack) &&
3407 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3409 * CMT: New acks were receieved for data sent to
3410 * this dest. But no new acks were seen for data
3411 * sent after tp1. Therefore, according to the SFR
3412 * algo for CMT, tp1 cannot be marked for FR using
3413 * this SACK. This step covers part of the DAC algo
3414 * and the HTNA algo as well.
3419 * Here we check to see if we were have already done a FR
3420 * and if so we see if the biggest TSN we saw in the sack is
3421 * smaller than the recovery point. If so we don't strike
3422 * the tsn... otherwise we CAN strike the TSN.
3425 * @@@ JRI: Check for CMT if (accum_moved &&
3426 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3429 if (accum_moved && asoc->fast_retran_loss_recovery) {
3431 * Strike the TSN if in fast-recovery and cum-ack
3434 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3435 sctp_log_fr(biggest_tsn_newly_acked,
3438 SCTP_FR_LOG_STRIKE_CHUNK);
3440 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3443 if ((asoc->sctp_cmt_on_off > 0) &&
3444 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3446 * CMT DAC algorithm: If SACK flag is set to
3447 * 0, then lowest_newack test will not pass
3448 * because it would have been set to the
3449 * cumack earlier. If not already to be
3450 * rtx'd, If not a mixed sack and if tp1 is
3451 * not between two sacked TSNs, then mark by
3452 * one more. NOTE that we are marking by one
3453 * additional time since the SACK DAC flag
3454 * indicates that two packets have been
3455 * received after this missing TSN.
3457 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3458 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3459 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3460 sctp_log_fr(16 + num_dests_sacked,
3463 SCTP_FR_LOG_STRIKE_CHUNK);
3468 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3469 (asoc->sctp_cmt_on_off == 0)) {
3471 * For those that have done a FR we must take
3472 * special consideration if we strike. I.e the
3473 * biggest_newly_acked must be higher than the
3474 * sending_seq at the time we did the FR.
3477 #ifdef SCTP_FR_TO_ALTERNATE
3479 * If FR's go to new networks, then we must only do
3480 * this for singly homed asoc's. However if the FR's
3481 * go to the same network (Armando's work) then its
3482 * ok to FR multiple times.
3490 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3491 tp1->rec.data.fast_retran_tsn)) {
3493 * Strike the TSN, since this ack is
3494 * beyond where things were when we
3497 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3498 sctp_log_fr(biggest_tsn_newly_acked,
3501 SCTP_FR_LOG_STRIKE_CHUNK);
3503 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3507 if ((asoc->sctp_cmt_on_off > 0) &&
3508 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3510 * CMT DAC algorithm: If
3511 * SACK flag is set to 0,
3512 * then lowest_newack test
3513 * will not pass because it
3514 * would have been set to
3515 * the cumack earlier. If
3516 * not already to be rtx'd,
3517 * If not a mixed sack and
3518 * if tp1 is not between two
3519 * sacked TSNs, then mark by
3520 * one more. NOTE that we
3521 * are marking by one
3522 * additional time since the
3523 * SACK DAC flag indicates
3524 * that two packets have
3525 * been received after this
3528 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3529 (num_dests_sacked == 1) &&
3530 SCTP_TSN_GT(this_sack_lowest_newack,
3531 tp1->rec.data.tsn)) {
3532 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3533 sctp_log_fr(32 + num_dests_sacked,
3536 SCTP_FR_LOG_STRIKE_CHUNK);
3538 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3546 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3549 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3550 biggest_tsn_newly_acked)) {
3552 * We don't strike these: This is the HTNA
3553 * algorithm i.e. we don't strike If our TSN is
3554 * larger than the Highest TSN Newly Acked.
3558 /* Strike the TSN */
3559 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3560 sctp_log_fr(biggest_tsn_newly_acked,
3563 SCTP_FR_LOG_STRIKE_CHUNK);
3565 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3568 if ((asoc->sctp_cmt_on_off > 0) &&
3569 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3571 * CMT DAC algorithm: If SACK flag is set to
3572 * 0, then lowest_newack test will not pass
3573 * because it would have been set to the
3574 * cumack earlier. If not already to be
3575 * rtx'd, If not a mixed sack and if tp1 is
3576 * not between two sacked TSNs, then mark by
3577 * one more. NOTE that we are marking by one
3578 * additional time since the SACK DAC flag
3579 * indicates that two packets have been
3580 * received after this missing TSN.
3582 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3583 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3584 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3585 sctp_log_fr(48 + num_dests_sacked,
3588 SCTP_FR_LOG_STRIKE_CHUNK);
3594 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3595 struct sctp_nets *alt;
3597 /* fix counts and things */
3598 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3599 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3600 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3602 (uint32_t)(uintptr_t)tp1->whoTo,
3606 tp1->whoTo->net_ack++;
3607 sctp_flight_size_decrease(tp1);
3608 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3609 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3614 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3615 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3616 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3618 /* add back to the rwnd */
3619 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3621 /* remove from the total flight */
3622 sctp_total_flight_decrease(stcb, tp1);
3624 if ((stcb->asoc.prsctp_supported) &&
3625 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3627 * Has it been retransmitted tv_sec times? -
3628 * we store the retran count there.
3630 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3631 /* Yes, so drop it */
3632 if (tp1->data != NULL) {
3633 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3634 SCTP_SO_NOT_LOCKED);
3636 /* Make sure to flag we had a FR */
3637 if (tp1->whoTo != NULL) {
3638 tp1->whoTo->net_ack++;
3644 * SCTP_PRINTF("OK, we are now ready to FR this
3647 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3648 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3652 /* This is a subsequent FR */
3653 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3655 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3656 if (asoc->sctp_cmt_on_off > 0) {
3658 * CMT: Using RTX_SSTHRESH policy for CMT.
3659 * If CMT is being used, then pick dest with
3660 * largest ssthresh for any retransmission.
3662 tp1->no_fr_allowed = 1;
3664 /* sa_ignore NO_NULL_CHK */
3665 if (asoc->sctp_cmt_pf > 0) {
3667 * JRS 5/18/07 - If CMT PF is on,
3668 * use the PF version of
3671 alt = sctp_find_alternate_net(stcb, alt, 2);
3674 * JRS 5/18/07 - If only CMT is on,
3675 * use the CMT version of
3678 /* sa_ignore NO_NULL_CHK */
3679 alt = sctp_find_alternate_net(stcb, alt, 1);
3685 * CUCv2: If a different dest is picked for
3686 * the retransmission, then new
3687 * (rtx-)pseudo_cumack needs to be tracked
3688 * for orig dest. Let CUCv2 track new (rtx-)
3689 * pseudo-cumack always.
3692 tp1->whoTo->find_pseudo_cumack = 1;
3693 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3696 } else { /* CMT is OFF */
3698 #ifdef SCTP_FR_TO_ALTERNATE
3699 /* Can we find an alternate? */
3700 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3703 * default behavior is to NOT retransmit
3704 * FR's to an alternate. Armando Caro's
3705 * paper details why.
3711 tp1->rec.data.doing_fast_retransmit = 1;
3713 /* mark the sending seq for possible subsequent FR's */
3715 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3716 * (uint32_t)tpi->rec.data.tsn);
3718 if (TAILQ_EMPTY(&asoc->send_queue)) {
3720 * If the queue of send is empty then its
3721 * the next sequence number that will be
3722 * assigned so we subtract one from this to
3723 * get the one we last sent.
3725 tp1->rec.data.fast_retran_tsn = sending_seq;
3728 * If there are chunks on the send queue
3729 * (unsent data that has made it from the
3730 * stream queues but not out the door, we
3731 * take the first one (which will have the
3732 * lowest TSN) and subtract one to get the
3735 struct sctp_tmit_chunk *ttt;
3737 ttt = TAILQ_FIRST(&asoc->send_queue);
3738 tp1->rec.data.fast_retran_tsn =
3744 * this guy had a RTO calculation pending on
3747 if ((tp1->whoTo != NULL) &&
3748 (tp1->whoTo->rto_needed == 0)) {
3749 tp1->whoTo->rto_needed = 1;
3753 if (alt != tp1->whoTo) {
3754 /* yes, there is an alternate. */
3755 sctp_free_remote_addr(tp1->whoTo);
3756 /* sa_ignore FREED_MEMORY */
3758 atomic_add_int(&alt->ref_count, 1);
3764 struct sctp_tmit_chunk *
3765 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3766 struct sctp_association *asoc)
3768 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3772 if (asoc->prsctp_supported == 0) {
3775 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3776 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3777 tp1->sent != SCTP_DATAGRAM_RESEND &&
3778 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3779 /* no chance to advance, out of here */
3782 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3783 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3784 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3785 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3786 asoc->advanced_peer_ack_point,
3787 tp1->rec.data.tsn, 0, 0);
3790 if (!PR_SCTP_ENABLED(tp1->flags)) {
3792 * We can't fwd-tsn past any that are reliable aka
3793 * retransmitted until the asoc fails.
3798 (void)SCTP_GETTIME_TIMEVAL(&now);
3802 * now we got a chunk which is marked for another
3803 * retransmission to a PR-stream but has run out its chances
3804 * already maybe OR has been marked to skip now. Can we skip
3805 * it if its a resend?
3807 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3808 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3810 * Now is this one marked for resend and its time is
3813 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3814 /* Yes so drop it */
3816 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3817 1, SCTP_SO_NOT_LOCKED);
3821 * No, we are done when hit one for resend
3822 * whos time as not expired.
3828 * Ok now if this chunk is marked to drop it we can clean up
3829 * the chunk, advance our peer ack point and we can check
3832 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3833 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3834 /* advance PeerAckPoint goes forward */
3835 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3836 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3838 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3839 /* No update but we do save the chk */
3844 * If it is still in RESEND we can advance no
3854 sctp_fs_audit(struct sctp_association *asoc)
3856 struct sctp_tmit_chunk *chk;
3857 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3860 int entry_flight, entry_cnt;
3865 entry_flight = asoc->total_flight;
3866 entry_cnt = asoc->total_flight_count;
3868 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3871 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3872 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3873 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3878 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3880 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3882 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3889 if ((inflight > 0) || (inbetween > 0)) {
3891 panic("Flight size-express incorrect? \n");
3893 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3894 entry_flight, entry_cnt);
3896 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3897 inflight, inbetween, resend, above, acked);
3906 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3907 struct sctp_association *asoc,
3908 struct sctp_tmit_chunk *tp1)
3910 tp1->window_probe = 0;
3911 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3912 /* TSN's skipped we do NOT move back. */
3913 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3914 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3916 (uint32_t)(uintptr_t)tp1->whoTo,
3920 /* First setup this by shrinking flight */
3921 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3922 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3925 sctp_flight_size_decrease(tp1);
3926 sctp_total_flight_decrease(stcb, tp1);
3927 /* Now mark for resend */
3928 tp1->sent = SCTP_DATAGRAM_RESEND;
3929 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3931 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3932 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3933 tp1->whoTo->flight_size,
3935 (uint32_t)(uintptr_t)tp1->whoTo,
3941 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3942 uint32_t rwnd, int *abort_now, int ecne_seen)
3944 struct sctp_nets *net;
3945 struct sctp_association *asoc;
3946 struct sctp_tmit_chunk *tp1, *tp2;
3948 int win_probe_recovery = 0;
3949 int win_probe_recovered = 0;
3950 int j, done_once = 0;
3954 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3955 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3956 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3958 SCTP_TCB_LOCK_ASSERT(stcb);
3959 #ifdef SCTP_ASOCLOG_OF_TSNS
3960 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3961 stcb->asoc.cumack_log_at++;
3962 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3963 stcb->asoc.cumack_log_at = 0;
3967 old_rwnd = asoc->peers_rwnd;
3968 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3971 } else if (asoc->last_acked_seq == cumack) {
3972 /* Window update sack */
3973 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3974 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3975 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3976 /* SWS sender side engages */
3977 asoc->peers_rwnd = 0;
3979 if (asoc->peers_rwnd > old_rwnd) {
3985 /* First setup for CC stuff */
3986 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3987 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3988 /* Drag along the window_tsn for cwr's */
3989 net->cwr_window_tsn = cumack;
3991 net->prev_cwnd = net->cwnd;
3996 * CMT: Reset CUC and Fast recovery algo variables before
3999 net->new_pseudo_cumack = 0;
4000 net->will_exit_fast_recovery = 0;
4001 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4002 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4005 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4006 tp1 = TAILQ_LAST(&asoc->sent_queue,
4007 sctpchunk_listhead);
4008 send_s = tp1->rec.data.tsn + 1;
4010 send_s = asoc->sending_seq;
4012 if (SCTP_TSN_GE(cumack, send_s)) {
4013 struct mbuf *op_err;
4014 char msg[SCTP_DIAG_INFO_LEN];
4018 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4020 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4021 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4022 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4025 asoc->this_sack_highest_gap = cumack;
4026 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4027 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4028 stcb->asoc.overall_error_count,
4030 SCTP_FROM_SCTP_INDATA,
4033 stcb->asoc.overall_error_count = 0;
4034 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4035 /* process the new consecutive TSN first */
4036 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4037 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4038 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4039 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4041 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4043 * If it is less than ACKED, it is
4044 * now no-longer in flight. Higher
4045 * values may occur during marking
4047 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4049 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4050 tp1->whoTo->flight_size,
4052 (uint32_t)(uintptr_t)tp1->whoTo,
4055 sctp_flight_size_decrease(tp1);
4056 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4057 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4060 /* sa_ignore NO_NULL_CHK */
4061 sctp_total_flight_decrease(stcb, tp1);
4063 tp1->whoTo->net_ack += tp1->send_size;
4064 if (tp1->snd_count < 2) {
4066 * True non-retransmited
4069 tp1->whoTo->net_ack2 +=
4072 /* update RTO too? */
4080 sctp_calculate_rto(stcb,
4082 &tp1->sent_rcv_time,
4083 SCTP_RTT_FROM_DATA);
4086 if (tp1->whoTo->rto_needed == 0) {
4087 tp1->whoTo->rto_needed = 1;
4093 * CMT: CUCv2 algorithm. From the
4094 * cumack'd TSNs, for each TSN being
4095 * acked for the first time, set the
4096 * following variables for the
4097 * corresp destination.
4098 * new_pseudo_cumack will trigger a
4100 * find_(rtx_)pseudo_cumack will
4101 * trigger search for the next
4102 * expected (rtx-)pseudo-cumack.
4104 tp1->whoTo->new_pseudo_cumack = 1;
4105 tp1->whoTo->find_pseudo_cumack = 1;
4106 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4108 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4109 /* sa_ignore NO_NULL_CHK */
4110 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4113 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4114 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4116 if (tp1->rec.data.chunk_was_revoked) {
4117 /* deflate the cwnd */
4118 tp1->whoTo->cwnd -= tp1->book_size;
4119 tp1->rec.data.chunk_was_revoked = 0;
4121 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4122 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4123 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4126 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4130 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4131 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4132 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4133 asoc->trigger_reset = 1;
4135 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4137 /* sa_ignore NO_NULL_CHK */
4138 sctp_free_bufspace(stcb, asoc, tp1, 1);
4139 sctp_m_freem(tp1->data);
4142 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4143 sctp_log_sack(asoc->last_acked_seq,
4148 SCTP_LOG_FREE_SENT);
4150 asoc->sent_queue_cnt--;
4151 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4158 /* sa_ignore NO_NULL_CHK */
4159 if (stcb->sctp_socket) {
4160 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4164 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4165 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4166 /* sa_ignore NO_NULL_CHK */
4167 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4169 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4170 so = SCTP_INP_SO(stcb->sctp_ep);
4171 atomic_add_int(&stcb->asoc.refcnt, 1);
4172 SCTP_TCB_UNLOCK(stcb);
4173 SCTP_SOCKET_LOCK(so, 1);
4174 SCTP_TCB_LOCK(stcb);
4175 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4176 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4177 /* assoc was freed while we were unlocked */
4178 SCTP_SOCKET_UNLOCK(so, 1);
4182 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4183 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4184 SCTP_SOCKET_UNLOCK(so, 1);
4187 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4188 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4192 /* JRS - Use the congestion control given in the CC module */
4193 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4194 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4195 if (net->net_ack2 > 0) {
4197 * Karn's rule applies to clearing error
4198 * count, this is optional.
4200 net->error_count = 0;
4201 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4202 /* addr came good */
4203 net->dest_state |= SCTP_ADDR_REACHABLE;
4204 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4205 0, (void *)net, SCTP_SO_NOT_LOCKED);
4207 if (net == stcb->asoc.primary_destination) {
4208 if (stcb->asoc.alternate) {
4210 * release the alternate,
4213 sctp_free_remote_addr(stcb->asoc.alternate);
4214 stcb->asoc.alternate = NULL;
4217 if (net->dest_state & SCTP_ADDR_PF) {
4218 net->dest_state &= ~SCTP_ADDR_PF;
4219 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4220 stcb->sctp_ep, stcb, net,
4221 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4222 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4223 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4224 /* Done with this net */
4227 /* restore any doubled timers */
4228 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4229 if (net->RTO < stcb->asoc.minrto) {
4230 net->RTO = stcb->asoc.minrto;
4232 if (net->RTO > stcb->asoc.maxrto) {
4233 net->RTO = stcb->asoc.maxrto;
4237 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4239 asoc->last_acked_seq = cumack;
4241 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4242 /* nothing left in-flight */
4243 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4244 net->flight_size = 0;
4245 net->partial_bytes_acked = 0;
4247 asoc->total_flight = 0;
4248 asoc->total_flight_count = 0;
4252 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4253 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4254 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4255 /* SWS sender side engages */
4256 asoc->peers_rwnd = 0;
4258 if (asoc->peers_rwnd > old_rwnd) {
4259 win_probe_recovery = 1;
4261 /* Now assure a timer where data is queued at */
4264 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4265 if (win_probe_recovery && (net->window_probe)) {
4266 win_probe_recovered = 1;
4268 * Find first chunk that was used with window probe
4269 * and clear the sent
4271 /* sa_ignore FREED_MEMORY */
4272 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4273 if (tp1->window_probe) {
4274 /* move back to data send queue */
4275 sctp_window_probe_recovery(stcb, asoc, tp1);
4280 if (net->flight_size) {
4282 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4283 if (net->window_probe) {
4284 net->window_probe = 0;
4287 if (net->window_probe) {
4289 * In window probes we must assure a timer
4290 * is still running there
4292 net->window_probe = 0;
4293 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4294 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4296 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4297 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4299 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4304 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4305 (asoc->sent_queue_retran_cnt == 0) &&
4306 (win_probe_recovered == 0) &&
4309 * huh, this should not happen unless all packets are
4310 * PR-SCTP and marked to skip of course.
4312 if (sctp_fs_audit(asoc)) {
4313 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4314 net->flight_size = 0;
4316 asoc->total_flight = 0;
4317 asoc->total_flight_count = 0;
4318 asoc->sent_queue_retran_cnt = 0;
4319 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4320 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4321 sctp_flight_size_increase(tp1);
4322 sctp_total_flight_increase(stcb, tp1);
4323 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4324 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4331 /**********************************/
4332 /* Now what about shutdown issues */
4333 /**********************************/
4334 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4335 /* nothing left on sendqueue.. consider done */
4337 if ((asoc->stream_queue_cnt == 1) &&
4338 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4339 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4340 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4341 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4343 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4344 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4345 (asoc->stream_queue_cnt == 1) &&
4346 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4347 struct mbuf *op_err;
4351 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4352 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4353 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4356 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4357 (asoc->stream_queue_cnt == 0)) {
4358 struct sctp_nets *netp;
4360 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4361 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4362 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4364 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4365 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
4366 sctp_stop_timers_for_shutdown(stcb);
4367 if (asoc->alternate) {
4368 netp = asoc->alternate;
4370 netp = asoc->primary_destination;
4372 sctp_send_shutdown(stcb, netp);
4373 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4374 stcb->sctp_ep, stcb, netp);
4375 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4376 stcb->sctp_ep, stcb, netp);
4377 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4378 (asoc->stream_queue_cnt == 0)) {
4379 struct sctp_nets *netp;
4381 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4382 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4383 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
4384 sctp_stop_timers_for_shutdown(stcb);
4385 if (asoc->alternate) {
4386 netp = asoc->alternate;
4388 netp = asoc->primary_destination;
4390 sctp_send_shutdown_ack(stcb, netp);
4391 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4392 stcb->sctp_ep, stcb, netp);
4395 /*********************************************/
4396 /* Here we perform PR-SCTP procedures */
4398 /*********************************************/
4399 /* C1. update advancedPeerAckPoint */
4400 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4401 asoc->advanced_peer_ack_point = cumack;
4403 /* PR-Sctp issues need to be addressed too */
4404 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4405 struct sctp_tmit_chunk *lchk;
4406 uint32_t old_adv_peer_ack_point;
4408 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4409 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4410 /* C3. See if we need to send a Fwd-TSN */
4411 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4413 * ISSUE with ECN, see FWD-TSN processing.
4415 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4416 send_forward_tsn(stcb, asoc);
4418 /* try to FR fwd-tsn's that get lost too */
4419 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4420 send_forward_tsn(stcb, asoc);
4425 /* Assure a timer is up */
4426 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4427 stcb->sctp_ep, stcb, lchk->whoTo);
4430 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4431 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4433 stcb->asoc.peers_rwnd,
4434 stcb->asoc.total_flight,
4435 stcb->asoc.total_output_queue_size);
4440 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4441 struct sctp_tcb *stcb,
4442 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4443 int *abort_now, uint8_t flags,
4444 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4446 struct sctp_association *asoc;
4447 struct sctp_tmit_chunk *tp1, *tp2;
4448 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4449 uint16_t wake_him = 0;
4450 uint32_t send_s = 0;
4452 int accum_moved = 0;
4453 int will_exit_fast_recovery = 0;
4454 uint32_t a_rwnd, old_rwnd;
4455 int win_probe_recovery = 0;
4456 int win_probe_recovered = 0;
4457 struct sctp_nets *net = NULL;
4460 uint8_t reneged_all = 0;
4461 uint8_t cmt_dac_flag;
4464 * we take any chance we can to service our queues since we cannot
4465 * get awoken when the socket is read from :<
4468 * Now perform the actual SACK handling: 1) Verify that it is not an
4469 * old sack, if so discard. 2) If there is nothing left in the send
4470 * queue (cum-ack is equal to last acked) then you have a duplicate
4471 * too, update any rwnd change and verify no timers are running.
4472 * then return. 3) Process any new consequtive data i.e. cum-ack
4473 * moved process these first and note that it moved. 4) Process any
4474 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4475 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4476 * sync up flightsizes and things, stop all timers and also check
4477 * for shutdown_pending state. If so then go ahead and send off the
4478 * shutdown. If in shutdown recv, send off the shutdown-ack and
4479 * start that timer, Ret. 9) Strike any non-acked things and do FR
4480 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4481 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4482 * if in shutdown_recv state.
4484 SCTP_TCB_LOCK_ASSERT(stcb);
4486 this_sack_lowest_newack = 0;
4487 SCTP_STAT_INCR(sctps_slowpath_sack);
4489 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4490 #ifdef SCTP_ASOCLOG_OF_TSNS
4491 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4492 stcb->asoc.cumack_log_at++;
4493 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4494 stcb->asoc.cumack_log_at = 0;
4499 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4500 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4501 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4504 old_rwnd = stcb->asoc.peers_rwnd;
4505 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4506 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4507 stcb->asoc.overall_error_count,
4509 SCTP_FROM_SCTP_INDATA,
4512 stcb->asoc.overall_error_count = 0;
4514 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4515 sctp_log_sack(asoc->last_acked_seq,
4522 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4524 uint32_t *dupdata, dblock;
4526 for (i = 0; i < num_dup; i++) {
4527 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4528 sizeof(uint32_t), (uint8_t *)&dblock);
4529 if (dupdata == NULL) {
4532 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4536 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4537 tp1 = TAILQ_LAST(&asoc->sent_queue,
4538 sctpchunk_listhead);
4539 send_s = tp1->rec.data.tsn + 1;
4542 send_s = asoc->sending_seq;
4544 if (SCTP_TSN_GE(cum_ack, send_s)) {
4545 struct mbuf *op_err;
4546 char msg[SCTP_DIAG_INFO_LEN];
4549 * no way, we have not even sent this TSN out yet. Peer is
4550 * hopelessly messed up with us.
4552 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4555 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4556 tp1->rec.data.tsn, (void *)tp1);
4561 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4563 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4564 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4565 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4568 /**********************/
4569 /* 1) check the range */
4570 /**********************/
4571 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4572 /* acking something behind */
4576 /* update the Rwnd of the peer */
4577 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4578 TAILQ_EMPTY(&asoc->send_queue) &&
4579 (asoc->stream_queue_cnt == 0)) {
4580 /* nothing left on send/sent and strmq */
4581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4582 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4583 asoc->peers_rwnd, 0, 0, a_rwnd);
4585 asoc->peers_rwnd = a_rwnd;
4586 if (asoc->sent_queue_retran_cnt) {
4587 asoc->sent_queue_retran_cnt = 0;
4589 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4590 /* SWS sender side engages */
4591 asoc->peers_rwnd = 0;
4593 /* stop any timers */
4594 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4595 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4596 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4597 net->partial_bytes_acked = 0;
4598 net->flight_size = 0;
4600 asoc->total_flight = 0;
4601 asoc->total_flight_count = 0;
4605 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4606 * things. The total byte count acked is tracked in netAckSz AND
4607 * netAck2 is used to track the total bytes acked that are un-
4608 * amibguious and were never retransmitted. We track these on a per
4609 * destination address basis.
4611 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4612 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4613 /* Drag along the window_tsn for cwr's */
4614 net->cwr_window_tsn = cum_ack;
4616 net->prev_cwnd = net->cwnd;
4621 * CMT: Reset CUC and Fast recovery algo variables before
4624 net->new_pseudo_cumack = 0;
4625 net->will_exit_fast_recovery = 0;
4626 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4627 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4631 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4632 * to be greater than the cumack. Also reset saw_newack to 0
4635 net->saw_newack = 0;
4636 net->this_sack_highest_newack = last_tsn;
4638 /* process the new consecutive TSN first */
4639 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4640 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4641 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4643 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4645 * If it is less than ACKED, it is
4646 * now no-longer in flight. Higher
4647 * values may occur during marking
4649 if ((tp1->whoTo->dest_state &
4650 SCTP_ADDR_UNCONFIRMED) &&
4651 (tp1->snd_count < 2)) {
4653 * If there was no retran
4654 * and the address is
4655 * un-confirmed and we sent
4657 * sacked.. its confirmed,
4660 tp1->whoTo->dest_state &=
4661 ~SCTP_ADDR_UNCONFIRMED;
4663 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4664 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4665 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4666 tp1->whoTo->flight_size,
4668 (uint32_t)(uintptr_t)tp1->whoTo,
4671 sctp_flight_size_decrease(tp1);
4672 sctp_total_flight_decrease(stcb, tp1);
4673 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4674 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4678 tp1->whoTo->net_ack += tp1->send_size;
4680 /* CMT SFR and DAC algos */
4681 this_sack_lowest_newack = tp1->rec.data.tsn;
4682 tp1->whoTo->saw_newack = 1;
4684 if (tp1->snd_count < 2) {
4686 * True non-retransmited
4689 tp1->whoTo->net_ack2 +=
4692 /* update RTO too? */
4696 sctp_calculate_rto(stcb,
4698 &tp1->sent_rcv_time,
4699 SCTP_RTT_FROM_DATA);
4702 if (tp1->whoTo->rto_needed == 0) {
4703 tp1->whoTo->rto_needed = 1;
4709 * CMT: CUCv2 algorithm. From the
4710 * cumack'd TSNs, for each TSN being
4711 * acked for the first time, set the
4712 * following variables for the
4713 * corresp destination.
4714 * new_pseudo_cumack will trigger a
4716 * find_(rtx_)pseudo_cumack will
4717 * trigger search for the next
4718 * expected (rtx-)pseudo-cumack.
4720 tp1->whoTo->new_pseudo_cumack = 1;
4721 tp1->whoTo->find_pseudo_cumack = 1;
4722 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4725 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4726 sctp_log_sack(asoc->last_acked_seq,
4731 SCTP_LOG_TSN_ACKED);
4733 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4734 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4737 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4738 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4739 #ifdef SCTP_AUDITING_ENABLED
4740 sctp_audit_log(0xB3,
4741 (asoc->sent_queue_retran_cnt & 0x000000ff));
4744 if (tp1->rec.data.chunk_was_revoked) {
4745 /* deflate the cwnd */
4746 tp1->whoTo->cwnd -= tp1->book_size;
4747 tp1->rec.data.chunk_was_revoked = 0;
4749 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4750 tp1->sent = SCTP_DATAGRAM_ACKED;
4757 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4758 /* always set this up to cum-ack */
4759 asoc->this_sack_highest_gap = last_tsn;
4761 if ((num_seg > 0) || (num_nr_seg > 0)) {
4764 * thisSackHighestGap will increase while handling NEW
4765 * segments this_sack_highest_newack will increase while
4766 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4767 * used for CMT DAC algo. saw_newack will also change.
4769 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4770 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4771 num_seg, num_nr_seg, &rto_ok)) {
4775 * validate the biggest_tsn_acked in the gap acks if strict
4776 * adherence is wanted.
4778 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4780 * peer is either confused or we are under attack.
4783 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4784 biggest_tsn_acked, send_s);
4788 /*******************************************/
4789 /* cancel ALL T3-send timer if accum moved */
4790 /*******************************************/
4791 if (asoc->sctp_cmt_on_off > 0) {
4792 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4793 if (net->new_pseudo_cumack)
4794 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4796 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4801 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4802 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4803 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4807 /********************************************/
4808 /* drop the acked chunks from the sentqueue */
4809 /********************************************/
4810 asoc->last_acked_seq = cum_ack;
4812 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4813 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4816 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4817 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4818 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4821 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4825 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4826 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4827 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4828 asoc->trigger_reset = 1;
4830 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4831 if (PR_SCTP_ENABLED(tp1->flags)) {
4832 if (asoc->pr_sctp_cnt != 0)
4833 asoc->pr_sctp_cnt--;
4835 asoc->sent_queue_cnt--;
4837 /* sa_ignore NO_NULL_CHK */
4838 sctp_free_bufspace(stcb, asoc, tp1, 1);
4839 sctp_m_freem(tp1->data);
4841 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4842 asoc->sent_queue_cnt_removeable--;
4845 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4846 sctp_log_sack(asoc->last_acked_seq,
4851 SCTP_LOG_FREE_SENT);
4853 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4856 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4858 panic("Warning flight size is positive and should be 0");
4860 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4861 asoc->total_flight);
4863 asoc->total_flight = 0;
4866 /* sa_ignore NO_NULL_CHK */
4867 if ((wake_him) && (stcb->sctp_socket)) {
4868 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4872 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4873 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4874 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4876 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4877 so = SCTP_INP_SO(stcb->sctp_ep);
4878 atomic_add_int(&stcb->asoc.refcnt, 1);
4879 SCTP_TCB_UNLOCK(stcb);
4880 SCTP_SOCKET_LOCK(so, 1);
4881 SCTP_TCB_LOCK(stcb);
4882 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4883 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4884 /* assoc was freed while we were unlocked */
4885 SCTP_SOCKET_UNLOCK(so, 1);
4889 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4890 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4891 SCTP_SOCKET_UNLOCK(so, 1);
4894 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4895 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4899 if (asoc->fast_retran_loss_recovery && accum_moved) {
4900 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4901 /* Setup so we will exit RFC2582 fast recovery */
4902 will_exit_fast_recovery = 1;
4906 * Check for revoked fragments:
4908 * if Previous sack - Had no frags then we can't have any revoked if
4909 * Previous sack - Had frag's then - If we now have frags aka
4910 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4911 * some of them. else - The peer revoked all ACKED fragments, since
4912 * we had some before and now we have NONE.
4916 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4917 asoc->saw_sack_with_frags = 1;
4918 } else if (asoc->saw_sack_with_frags) {
4919 int cnt_revoked = 0;
4921 /* Peer revoked all dg's marked or acked */
4922 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4923 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4924 tp1->sent = SCTP_DATAGRAM_SENT;
4925 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4926 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4927 tp1->whoTo->flight_size,
4929 (uint32_t)(uintptr_t)tp1->whoTo,
4932 sctp_flight_size_increase(tp1);
4933 sctp_total_flight_increase(stcb, tp1);
4934 tp1->rec.data.chunk_was_revoked = 1;
4936 * To ensure that this increase in
4937 * flightsize, which is artificial, does not
4938 * throttle the sender, we also increase the
4939 * cwnd artificially.
4941 tp1->whoTo->cwnd += tp1->book_size;
4948 asoc->saw_sack_with_frags = 0;
4951 asoc->saw_sack_with_nr_frags = 1;
4953 asoc->saw_sack_with_nr_frags = 0;
4955 /* JRS - Use the congestion control given in the CC module */
4956 if (ecne_seen == 0) {
4957 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4958 if (net->net_ack2 > 0) {
4960 * Karn's rule applies to clearing error
4961 * count, this is optional.
4963 net->error_count = 0;
4964 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4965 /* addr came good */
4966 net->dest_state |= SCTP_ADDR_REACHABLE;
4967 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4968 0, (void *)net, SCTP_SO_NOT_LOCKED);
4971 if (net == stcb->asoc.primary_destination) {
4972 if (stcb->asoc.alternate) {
4974 * release the alternate,
4977 sctp_free_remote_addr(stcb->asoc.alternate);
4978 stcb->asoc.alternate = NULL;
4982 if (net->dest_state & SCTP_ADDR_PF) {
4983 net->dest_state &= ~SCTP_ADDR_PF;
4984 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4985 stcb->sctp_ep, stcb, net,
4986 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4987 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4988 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4989 /* Done with this net */
4992 /* restore any doubled timers */
4993 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4994 if (net->RTO < stcb->asoc.minrto) {
4995 net->RTO = stcb->asoc.minrto;
4997 if (net->RTO > stcb->asoc.maxrto) {
4998 net->RTO = stcb->asoc.maxrto;
5002 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5005 if (TAILQ_EMPTY(&asoc->sent_queue)) {
5006 /* nothing left in-flight */
5007 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5008 /* stop all timers */
5009 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5011 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5012 net->flight_size = 0;
5013 net->partial_bytes_acked = 0;
5015 asoc->total_flight = 0;
5016 asoc->total_flight_count = 0;
5019 /**********************************/
5020 /* Now what about shutdown issues */
5021 /**********************************/
5022 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5023 /* nothing left on sendqueue.. consider done */
5024 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5025 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5026 asoc->peers_rwnd, 0, 0, a_rwnd);
5028 asoc->peers_rwnd = a_rwnd;
5029 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5030 /* SWS sender side engages */
5031 asoc->peers_rwnd = 0;
5034 if ((asoc->stream_queue_cnt == 1) &&
5035 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5036 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5037 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5038 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5040 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5041 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5042 (asoc->stream_queue_cnt == 1) &&
5043 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5044 struct mbuf *op_err;
5048 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5049 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5050 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5053 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5054 (asoc->stream_queue_cnt == 0)) {
5055 struct sctp_nets *netp;
5057 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5058 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5059 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5061 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5062 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
5063 sctp_stop_timers_for_shutdown(stcb);
5064 if (asoc->alternate) {
5065 netp = asoc->alternate;
5067 netp = asoc->primary_destination;
5069 sctp_send_shutdown(stcb, netp);
5070 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5071 stcb->sctp_ep, stcb, netp);
5072 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5073 stcb->sctp_ep, stcb, netp);
5075 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5076 (asoc->stream_queue_cnt == 0)) {
5077 struct sctp_nets *netp;
5079 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5080 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5081 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
5082 sctp_stop_timers_for_shutdown(stcb);
5083 if (asoc->alternate) {
5084 netp = asoc->alternate;
5086 netp = asoc->primary_destination;
5088 sctp_send_shutdown_ack(stcb, netp);
5089 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5090 stcb->sctp_ep, stcb, netp);
5095 * Now here we are going to recycle net_ack for a different use...
5098 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5103 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5104 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5105 * automatically ensure that.
5107 if ((asoc->sctp_cmt_on_off > 0) &&
5108 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5109 (cmt_dac_flag == 0)) {
5110 this_sack_lowest_newack = cum_ack;
5112 if ((num_seg > 0) || (num_nr_seg > 0)) {
5113 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5114 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5116 /* JRS - Use the congestion control given in the CC module */
5117 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5119 /* Now are we exiting loss recovery ? */
5120 if (will_exit_fast_recovery) {
5121 /* Ok, we must exit fast recovery */
5122 asoc->fast_retran_loss_recovery = 0;
5124 if ((asoc->sat_t3_loss_recovery) &&
5125 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5126 /* end satellite t3 loss recovery */
5127 asoc->sat_t3_loss_recovery = 0;
5132 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5133 if (net->will_exit_fast_recovery) {
5134 /* Ok, we must exit fast recovery */
5135 net->fast_retran_loss_recovery = 0;
5139 /* Adjust and set the new rwnd value */
5140 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5141 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5142 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5144 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5145 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5146 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5147 /* SWS sender side engages */
5148 asoc->peers_rwnd = 0;
5150 if (asoc->peers_rwnd > old_rwnd) {
5151 win_probe_recovery = 1;
5155 * Now we must setup so we have a timer up for anyone with
5161 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5162 if (win_probe_recovery && (net->window_probe)) {
5163 win_probe_recovered = 1;
5165 * Find first chunk that was used with
5166 * window probe and clear the event. Put
5167 * it back into the send queue as if has
5170 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5171 if (tp1->window_probe) {
5172 sctp_window_probe_recovery(stcb, asoc, tp1);
5177 if (net->flight_size) {
5179 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5180 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5181 stcb->sctp_ep, stcb, net);
5183 if (net->window_probe) {
5184 net->window_probe = 0;
5187 if (net->window_probe) {
5189 * In window probes we must assure a timer
5190 * is still running there
5192 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5193 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5194 stcb->sctp_ep, stcb, net);
5197 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5198 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5200 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5205 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5206 (asoc->sent_queue_retran_cnt == 0) &&
5207 (win_probe_recovered == 0) &&
5210 * huh, this should not happen unless all packets are
5211 * PR-SCTP and marked to skip of course.
5213 if (sctp_fs_audit(asoc)) {
5214 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5215 net->flight_size = 0;
5217 asoc->total_flight = 0;
5218 asoc->total_flight_count = 0;
5219 asoc->sent_queue_retran_cnt = 0;
5220 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5221 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5222 sctp_flight_size_increase(tp1);
5223 sctp_total_flight_increase(stcb, tp1);
5224 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5225 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5232 /*********************************************/
5233 /* Here we perform PR-SCTP procedures */
5235 /*********************************************/
5236 /* C1. update advancedPeerAckPoint */
5237 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5238 asoc->advanced_peer_ack_point = cum_ack;
5240 /* C2. try to further move advancedPeerAckPoint ahead */
5241 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5242 struct sctp_tmit_chunk *lchk;
5243 uint32_t old_adv_peer_ack_point;
5245 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5246 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5247 /* C3. See if we need to send a Fwd-TSN */
5248 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5250 * ISSUE with ECN, see FWD-TSN processing.
5252 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5253 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5254 0xee, cum_ack, asoc->advanced_peer_ack_point,
5255 old_adv_peer_ack_point);
5257 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5258 send_forward_tsn(stcb, asoc);
5260 /* try to FR fwd-tsn's that get lost too */
5261 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5262 send_forward_tsn(stcb, asoc);
5267 /* Assure a timer is up */
5268 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5269 stcb->sctp_ep, stcb, lchk->whoTo);
5272 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5273 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5275 stcb->asoc.peers_rwnd,
5276 stcb->asoc.total_flight,
5277 stcb->asoc.total_output_queue_size);
5282 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5285 uint32_t cum_ack, a_rwnd;
5287 cum_ack = ntohl(cp->cumulative_tsn_ack);
5288 /* Arrange so a_rwnd does NOT change */
5289 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5291 /* Now call the express sack handling */
5292 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5296 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5297 struct sctp_stream_in *strmin)
5299 struct sctp_queued_to_read *control, *ncontrol;
5300 struct sctp_association *asoc;
5302 int need_reasm_check = 0;
5305 mid = strmin->last_mid_delivered;
5307 * First deliver anything prior to and including the stream no that
5310 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5311 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5312 /* this is deliverable now */
5313 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5314 if (control->on_strm_q) {
5315 if (control->on_strm_q == SCTP_ON_ORDERED) {
5316 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5317 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5318 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5321 panic("strmin: %p ctl: %p unknown %d",
5322 strmin, control, control->on_strm_q);
5325 control->on_strm_q = 0;
5327 /* subtract pending on streams */
5328 if (asoc->size_on_all_streams >= control->length) {
5329 asoc->size_on_all_streams -= control->length;
5332 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5334 asoc->size_on_all_streams = 0;
5337 sctp_ucount_decr(asoc->cnt_on_all_streams);
5338 /* deliver it to at least the delivery-q */
5339 if (stcb->sctp_socket) {
5340 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5341 sctp_add_to_readq(stcb->sctp_ep, stcb,
5343 &stcb->sctp_socket->so_rcv,
5344 1, SCTP_READ_LOCK_HELD,
5345 SCTP_SO_NOT_LOCKED);
5348 /* Its a fragmented message */
5349 if (control->first_frag_seen) {
5351 * Make it so this is next to
5352 * deliver, we restore later
5354 strmin->last_mid_delivered = control->mid - 1;
5355 need_reasm_check = 1;
5360 /* no more delivery now. */
5364 if (need_reasm_check) {
5367 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5368 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5369 /* Restore the next to deliver unless we are ahead */
5370 strmin->last_mid_delivered = mid;
5373 /* Left the front Partial one on */
5376 need_reasm_check = 0;
5379 * now we must deliver things in queue the normal way if any are
5382 mid = strmin->last_mid_delivered + 1;
5383 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5384 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5385 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5386 /* this is deliverable now */
5387 if (control->on_strm_q) {
5388 if (control->on_strm_q == SCTP_ON_ORDERED) {
5389 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5390 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5391 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5394 panic("strmin: %p ctl: %p unknown %d",
5395 strmin, control, control->on_strm_q);
5398 control->on_strm_q = 0;
5400 /* subtract pending on streams */
5401 if (asoc->size_on_all_streams >= control->length) {
5402 asoc->size_on_all_streams -= control->length;
5405 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5407 asoc->size_on_all_streams = 0;
5410 sctp_ucount_decr(asoc->cnt_on_all_streams);
5411 /* deliver it to at least the delivery-q */
5412 strmin->last_mid_delivered = control->mid;
5413 if (stcb->sctp_socket) {
5414 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5415 sctp_add_to_readq(stcb->sctp_ep, stcb,
5417 &stcb->sctp_socket->so_rcv, 1,
5418 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5421 mid = strmin->last_mid_delivered + 1;
5423 /* Its a fragmented message */
5424 if (control->first_frag_seen) {
5426 * Make it so this is next to
5429 strmin->last_mid_delivered = control->mid - 1;
5430 need_reasm_check = 1;
5438 if (need_reasm_check) {
5439 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5446 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5447 struct sctp_association *asoc,
5448 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5450 struct sctp_queued_to_read *control;
5451 struct sctp_stream_in *strm;
5452 struct sctp_tmit_chunk *chk, *nchk;
5453 int cnt_removed = 0;
5456 * For now large messages held on the stream reasm that are complete
5457 * will be tossed too. We could in theory do more work to spin
5458 * through and stop after dumping one msg aka seeing the start of a
5459 * new msg at the head, and call the delivery function... to see if
5460 * it can be delivered... But for now we just dump everything on the
5463 strm = &asoc->strmin[stream];
5464 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5465 if (control == NULL) {
5469 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5472 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5473 /* Purge hanging chunks */
5474 if (!asoc->idata_supported && (ordered == 0)) {
5475 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5480 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5481 if (asoc->size_on_reasm_queue >= chk->send_size) {
5482 asoc->size_on_reasm_queue -= chk->send_size;
5485 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5487 asoc->size_on_reasm_queue = 0;
5490 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5492 sctp_m_freem(chk->data);
5495 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5497 if (!TAILQ_EMPTY(&control->reasm)) {
5498 /* This has to be old data, unordered */
5499 if (control->data) {
5500 sctp_m_freem(control->data);
5501 control->data = NULL;
5503 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5504 chk = TAILQ_FIRST(&control->reasm);
5505 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5506 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5507 sctp_add_chk_to_control(control, strm, stcb, asoc,
5508 chk, SCTP_READ_LOCK_HELD);
5510 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5513 if (control->on_strm_q == SCTP_ON_ORDERED) {
5514 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5515 if (asoc->size_on_all_streams >= control->length) {
5516 asoc->size_on_all_streams -= control->length;
5519 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5521 asoc->size_on_all_streams = 0;
5524 sctp_ucount_decr(asoc->cnt_on_all_streams);
5525 control->on_strm_q = 0;
5526 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5527 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5528 control->on_strm_q = 0;
5530 } else if (control->on_strm_q) {
5531 panic("strm: %p ctl: %p unknown %d",
5532 strm, control, control->on_strm_q);
5535 control->on_strm_q = 0;
5536 if (control->on_read_q == 0) {
5537 sctp_free_remote_addr(control->whoFrom);
5538 if (control->data) {
5539 sctp_m_freem(control->data);
5540 control->data = NULL;
5542 sctp_free_a_readq(stcb, control);
5547 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5548 struct sctp_forward_tsn_chunk *fwd,
5549 int *abort_flag, struct mbuf *m, int offset)
5551 /* The pr-sctp fwd tsn */
5553 * here we will perform all the data receiver side steps for
5554 * processing FwdTSN, as required in by pr-sctp draft:
5556 * Assume we get FwdTSN(x):
5558 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5559 * + others we have 3) examine and update re-ordering queue on
5560 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5561 * report where we are.
5563 struct sctp_association *asoc;
5564 uint32_t new_cum_tsn, gap;
5565 unsigned int i, fwd_sz, m_size;
5567 struct sctp_stream_in *strm;
5568 struct sctp_queued_to_read *control, *sv;
5571 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5572 SCTPDBG(SCTP_DEBUG_INDATA1,
5573 "Bad size too small/big fwd-tsn\n");
5576 m_size = (stcb->asoc.mapping_array_size << 3);
5577 /*************************************************************/
5578 /* 1. Here we update local cumTSN and shift the bitmap array */
5579 /*************************************************************/
5580 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5582 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5583 /* Already got there ... */
5587 * now we know the new TSN is more advanced, let's find the actual
5590 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5591 asoc->cumulative_tsn = new_cum_tsn;
5592 if (gap >= m_size) {
5593 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5594 struct mbuf *op_err;
5595 char msg[SCTP_DIAG_INFO_LEN];
5598 * out of range (of single byte chunks in the rwnd I
5599 * give out). This must be an attacker.
5602 snprintf(msg, sizeof(msg),
5603 "New cum ack %8.8x too high, highest TSN %8.8x",
5604 new_cum_tsn, asoc->highest_tsn_inside_map);
5605 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5606 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5607 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5610 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5612 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5613 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5614 asoc->highest_tsn_inside_map = new_cum_tsn;
5616 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5617 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5619 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5620 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5623 SCTP_TCB_LOCK_ASSERT(stcb);
5624 for (i = 0; i <= gap; i++) {
5625 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5626 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5627 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5628 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5629 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5634 /*************************************************************/
5635 /* 2. Clear up re-assembly queue */
5636 /*************************************************************/
5638 /* This is now done as part of clearing up the stream/seq */
5639 if (asoc->idata_supported == 0) {
5642 /* Flush all the un-ordered data based on cum-tsn */
5643 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5644 for (sid = 0; sid < asoc->streamincnt; sid++) {
5645 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5647 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5649 /*******************************************************/
5650 /* 3. Update the PR-stream re-ordering queues and fix */
5651 /* delivery issues as needed. */
5652 /*******************************************************/
5653 fwd_sz -= sizeof(*fwd);
5656 unsigned int num_str;
5657 uint32_t mid, cur_mid;
5659 uint16_t ordered, flags;
5660 struct sctp_strseq *stseq, strseqbuf;
5661 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5663 offset += sizeof(*fwd);
5665 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5666 if (asoc->idata_supported) {
5667 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5669 num_str = fwd_sz / sizeof(struct sctp_strseq);
5671 for (i = 0; i < num_str; i++) {
5672 if (asoc->idata_supported) {
5673 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5674 sizeof(struct sctp_strseq_mid),
5675 (uint8_t *)&strseqbuf_m);
5676 offset += sizeof(struct sctp_strseq_mid);
5677 if (stseq_m == NULL) {
5680 sid = ntohs(stseq_m->sid);
5681 mid = ntohl(stseq_m->mid);
5682 flags = ntohs(stseq_m->flags);
5683 if (flags & PR_SCTP_UNORDERED_FLAG) {
5689 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5690 sizeof(struct sctp_strseq),
5691 (uint8_t *)&strseqbuf);
5692 offset += sizeof(struct sctp_strseq);
5693 if (stseq == NULL) {
5696 sid = ntohs(stseq->sid);
5697 mid = (uint32_t)ntohs(stseq->ssn);
5705 * Ok we now look for the stream/seq on the read
5706 * queue where its not all delivered. If we find it
5707 * we transmute the read entry into a PDI_ABORTED.
5709 if (sid >= asoc->streamincnt) {
5710 /* screwed up streams, stop! */
5713 if ((asoc->str_of_pdapi == sid) &&
5714 (asoc->ssn_of_pdapi == mid)) {
5716 * If this is the one we were partially
5717 * delivering now then we no longer are.
5718 * Note this will change with the reassembly
5721 asoc->fragmented_delivery_inprogress = 0;
5723 strm = &asoc->strmin[sid];
5724 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5725 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5727 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5728 if ((control->sinfo_stream == sid) &&
5729 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5730 str_seq = (sid << 16) | (0x0000ffff & mid);
5731 control->pdapi_aborted = 1;
5732 sv = stcb->asoc.control_pdapi;
5733 control->end_added = 1;
5734 if (control->on_strm_q == SCTP_ON_ORDERED) {
5735 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5736 if (asoc->size_on_all_streams >= control->length) {
5737 asoc->size_on_all_streams -= control->length;
5740 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5742 asoc->size_on_all_streams = 0;
5745 sctp_ucount_decr(asoc->cnt_on_all_streams);
5746 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5747 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5749 } else if (control->on_strm_q) {
5750 panic("strm: %p ctl: %p unknown %d",
5751 strm, control, control->on_strm_q);
5754 control->on_strm_q = 0;
5755 stcb->asoc.control_pdapi = control;
5756 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5758 SCTP_PARTIAL_DELIVERY_ABORTED,
5760 SCTP_SO_NOT_LOCKED);
5761 stcb->asoc.control_pdapi = sv;
5763 } else if ((control->sinfo_stream == sid) &&
5764 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5765 /* We are past our victim SSN */
5769 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5770 /* Update the sequence number */
5771 strm->last_mid_delivered = mid;
5773 /* now kick the stream the new way */
5774 /* sa_ignore NO_NULL_CHK */
5775 sctp_kick_prsctp_reorder_queue(stcb, strm);
5777 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5780 * Now slide thing forward.
5782 sctp_slide_mapping_arrays(stcb);