2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
54 * NOTES: On the outbound side of things I need to check the sack timer to
55 * see if I should generate a sack into the chunk queue (if I have data to
56 * send that is and will be sending it .. for bundling.
58 * The callback in sctp_usrreq.c will get called when the socket is read from.
59 * This will cause sctp_service_queues() to get called on the top entry in
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64 struct sctp_stream_in *strm,
65 struct sctp_tcb *stcb,
66 struct sctp_association *asoc,
67 struct sctp_tmit_chunk *chk);
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 /* Calculate what the rwnd would be */
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
83 * This is really set wrong with respect to a 1-2-m socket. Since
84 * the sb_cc is the count that everyone as put up. When we re-write
85 * sctp_soreceive then we will fix this so that ONLY this
86 * associations data is taken into account.
88 if (stcb->sctp_socket == NULL) {
91 if (stcb->asoc.sb_cc == 0 &&
92 asoc->size_on_reasm_queue == 0 &&
93 asoc->size_on_all_streams == 0) {
94 /* Full rwnd granted */
95 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
98 /* get actual space */
99 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
101 * take out what has NOT been put on socket queue and we yet hold
104 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 asoc->cnt_on_reasm_queue * MSIZE));
106 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 asoc->cnt_on_all_streams * MSIZE));
112 /* what is the overhead of all these rwnd's */
113 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
115 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 * even it is 0. SWS engaged
118 if (calc < stcb->asoc.my_rwnd_control_len) {
127 * Build out our readq entry based on the incoming packet.
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131 struct sctp_nets *net,
132 uint32_t tsn, uint32_t ppid,
133 uint32_t context, uint16_t stream_no,
134 uint32_t stream_seq, uint8_t flags,
137 struct sctp_queued_to_read *read_queue_e = NULL;
139 sctp_alloc_a_readq(stcb, read_queue_e);
140 if (read_queue_e == NULL) {
143 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 read_queue_e->sinfo_stream = stream_no;
145 read_queue_e->sinfo_ssn = stream_seq;
146 read_queue_e->sinfo_flags = (flags << 8);
147 read_queue_e->sinfo_ppid = ppid;
148 read_queue_e->sinfo_context = context;
149 read_queue_e->sinfo_tsn = tsn;
150 read_queue_e->sinfo_cumtsn = tsn;
151 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
152 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 TAILQ_INIT(&read_queue_e->reasm);
154 read_queue_e->whoFrom = net;
155 atomic_add_int(&net->ref_count, 1);
156 read_queue_e->data = dm;
157 read_queue_e->stcb = stcb;
158 read_queue_e->port_from = stcb->rport;
160 return (read_queue_e);
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
166 struct sctp_extrcvinfo *seinfo;
167 struct sctp_sndrcvinfo *outinfo;
168 struct sctp_rcvinfo *rcvinfo;
169 struct sctp_nxtinfo *nxtinfo;
176 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 /* user does not want any ancillary data */
183 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
186 seinfo = (struct sctp_extrcvinfo *)sinfo;
187 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
190 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
197 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
200 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
206 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
211 SCTP_BUF_LEN(ret) = 0;
213 /* We need a CMSG header followed by the struct */
214 cmh = mtod(ret, struct cmsghdr *);
216 * Make sure that there is no un-initialized padding between the
217 * cmsg header and cmsg data and after the cmsg data.
220 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 cmh->cmsg_level = IPPROTO_SCTP;
222 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 cmh->cmsg_type = SCTP_RCVINFO;
224 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 rcvinfo->rcv_context = sinfo->sinfo_context;
232 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
237 cmh->cmsg_level = IPPROTO_SCTP;
238 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 cmh->cmsg_type = SCTP_NXTINFO;
240 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 nxtinfo->nxt_flags = 0;
243 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 nxtinfo->nxt_flags |= SCTP_UNORDERED;
246 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 nxtinfo->nxt_flags |= SCTP_COMPLETE;
252 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 cmh->cmsg_level = IPPROTO_SCTP;
260 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
262 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 cmh->cmsg_type = SCTP_EXTRCV;
264 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 cmh->cmsg_type = SCTP_SNDRCV;
270 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
280 uint32_t gap, i, cumackp1;
282 int in_r = 0, in_nr = 0;
284 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
287 cumackp1 = asoc->cumulative_tsn + 1;
288 if (SCTP_TSN_GT(cumackp1, tsn)) {
290 * this tsn is behind the cum ack and thus we don't need to
291 * worry about it being moved from one to the other.
295 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 if ((in_r == 0) && (in_nr == 0)) {
300 panic("Things are really messed up now");
302 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 sctp_print_mapping_array(asoc);
307 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
309 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 asoc->highest_tsn_inside_nr_map = tsn;
313 if (tsn == asoc->highest_tsn_inside_map) {
314 /* We must back down to see what the new highest is */
315 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 asoc->highest_tsn_inside_map = i;
324 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331 struct sctp_association *asoc,
332 struct sctp_queued_to_read *control)
334 struct sctp_queued_to_read *at;
335 struct sctp_readhead *q;
336 uint8_t bits, unordered;
338 bits = (control->sinfo_flags >> 8);
339 unordered = bits & SCTP_DATA_UNORDERED;
341 q = &strm->uno_inqueue;
342 if (asoc->idata_supported == 0) {
343 if (!TAILQ_EMPTY(q)) {
345 * Only one stream can be here in old style
350 TAILQ_INSERT_TAIL(q, control, next_instrm);
351 control->on_strm_q = SCTP_ON_UNORDERED;
357 if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
360 if (TAILQ_EMPTY(q)) {
362 TAILQ_INSERT_HEAD(q, control, next_instrm);
364 control->on_strm_q = SCTP_ON_UNORDERED;
366 control->on_strm_q = SCTP_ON_ORDERED;
370 TAILQ_FOREACH(at, q, next_instrm) {
371 if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
373 * one in queue is bigger than the new one,
374 * insert before this one
376 TAILQ_INSERT_BEFORE(at, control, next_instrm);
378 control->on_strm_q = SCTP_ON_UNORDERED;
380 control->on_strm_q = SCTP_ON_ORDERED;
383 } else if (at->msg_id == control->msg_id) {
385 * Gak, He sent me a duplicate msg id
386 * number?? return -1 to abort.
390 if (TAILQ_NEXT(at, next_instrm) == NULL) {
392 * We are at the end, insert it
395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 sctp_log_strm_del(control, at,
397 SCTP_STR_LOG_FROM_INSERT_TL);
399 TAILQ_INSERT_AFTER(q,
400 at, control, next_instrm);
402 control->on_strm_q = SCTP_ON_UNORDERED;
404 control->on_strm_q = SCTP_ON_ORDERED;
415 sctp_abort_in_reasm(struct sctp_tcb *stcb,
416 struct sctp_queued_to_read *control,
417 struct sctp_tmit_chunk *chk,
418 int *abort_flag, int opspot)
420 char msg[SCTP_DIAG_INFO_LEN];
423 if (stcb->asoc.idata_supported) {
424 snprintf(msg, sizeof(msg),
425 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
427 control->fsn_included,
428 chk->rec.data.TSN_seq,
429 chk->rec.data.stream_number,
430 chk->rec.data.fsn_num, chk->rec.data.stream_seq);
432 snprintf(msg, sizeof(msg),
433 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
435 control->fsn_included,
436 chk->rec.data.TSN_seq,
437 chk->rec.data.stream_number,
438 chk->rec.data.fsn_num,
439 (uint16_t) chk->rec.data.stream_seq);
441 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
442 sctp_m_freem(chk->data);
444 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
445 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
446 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
451 clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
454 * The control could not be placed and must be cleaned.
456 struct sctp_tmit_chunk *chk, *nchk;
458 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
459 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
461 sctp_m_freem(chk->data);
463 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
465 sctp_free_a_readq(stcb, control);
469 * Queue the chunk either right into the socket buffer if it is the next one
470 * to go OR put it in the correct place in the delivery queue. If we do
471 * append to the so_buf, keep doing so until we are out of order as
472 * long as the control's entered are non-fragmented.
475 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
476 struct sctp_stream_in *strm,
477 struct sctp_association *asoc,
478 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
481 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
482 * all the data in one stream this could happen quite rapidly. One
483 * could use the TSN to keep track of things, but this scheme breaks
484 * down in the other type of stream usage that could occur. Send a
485 * single msg to stream 0, send 4Billion messages to stream 1, now
486 * send a message to stream 0. You have a situation where the TSN
487 * has wrapped but not in the stream. Is this worth worrying about
488 * or should we just change our queue sort at the bottom to be by
491 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
492 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
493 * assignment this could happen... and I don't see how this would be
494 * a violation. So for now I am undecided an will leave the sort by
495 * SSN alone. Maybe a hybred approach is the answer
498 struct sctp_queued_to_read *at;
502 char msg[SCTP_DIAG_INFO_LEN];
504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
505 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
507 if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
508 /* The incoming sseq is behind where we last delivered? */
509 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
510 control->sinfo_ssn, strm->last_sequence_delivered);
513 * throw it in the stream so it gets cleaned up in
514 * association destruction
516 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
517 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
518 strm->last_sequence_delivered, control->sinfo_tsn,
519 control->sinfo_stream, control->sinfo_ssn);
520 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
521 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
522 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
527 if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
531 asoc->size_on_all_streams += control->length;
532 sctp_ucount_incr(asoc->cnt_on_all_streams);
533 nxt_todel = strm->last_sequence_delivered + 1;
534 if (nxt_todel == control->sinfo_ssn) {
535 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
538 so = SCTP_INP_SO(stcb->sctp_ep);
539 atomic_add_int(&stcb->asoc.refcnt, 1);
540 SCTP_TCB_UNLOCK(stcb);
541 SCTP_SOCKET_LOCK(so, 1);
543 atomic_subtract_int(&stcb->asoc.refcnt, 1);
544 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
545 SCTP_SOCKET_UNLOCK(so, 1);
549 /* can be delivered right away? */
550 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
551 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
553 /* EY it wont be queued if it could be delivered directly */
555 asoc->size_on_all_streams -= control->length;
556 sctp_ucount_decr(asoc->cnt_on_all_streams);
557 strm->last_sequence_delivered++;
558 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
559 sctp_add_to_readq(stcb->sctp_ep, stcb,
561 &stcb->sctp_socket->so_rcv, 1,
562 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
563 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
565 nxt_todel = strm->last_sequence_delivered + 1;
566 if ((nxt_todel == control->sinfo_ssn) &&
567 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
568 asoc->size_on_all_streams -= control->length;
569 sctp_ucount_decr(asoc->cnt_on_all_streams);
570 if (control->on_strm_q == SCTP_ON_ORDERED) {
571 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
574 panic("Huh control: %p is on_strm_q: %d",
575 control, control->on_strm_q);
578 control->on_strm_q = 0;
579 strm->last_sequence_delivered++;
581 * We ignore the return of deliver_data here
582 * since we always can hold the chunk on the
583 * d-queue. And we have a finite number that
584 * can be delivered from the strq.
586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
587 sctp_log_strm_del(control, NULL,
588 SCTP_STR_LOG_FROM_IMMED_DEL);
590 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
591 sctp_add_to_readq(stcb->sctp_ep, stcb,
593 &stcb->sctp_socket->so_rcv, 1,
594 SCTP_READ_LOCK_NOT_HELD,
597 } else if (nxt_todel == control->sinfo_ssn) {
602 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
603 SCTP_SOCKET_UNLOCK(so, 1);
608 * Ok, we did not deliver this guy, find the correct place
609 * to put it on the queue.
611 if (sctp_place_control_in_stream(strm, asoc, control)) {
612 snprintf(msg, sizeof(msg),
613 "Queue to str msg_id: %u duplicate",
615 clean_up_control(stcb, control);
616 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
617 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
618 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
626 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
628 struct mbuf *m, *prev = NULL;
629 struct sctp_tcb *stcb;
631 stcb = control->stcb;
632 control->held_length = 0;
636 if (SCTP_BUF_LEN(m) == 0) {
637 /* Skip mbufs with NO length */
640 control->data = sctp_m_free(m);
643 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
644 m = SCTP_BUF_NEXT(prev);
647 control->tail_mbuf = prev;
652 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
653 if (control->on_read_q) {
655 * On read queue so we must increment the SB stuff,
656 * we assume caller has done any locks of SB.
658 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
660 m = SCTP_BUF_NEXT(m);
663 control->tail_mbuf = prev;
668 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
670 struct mbuf *prev = NULL;
671 struct sctp_tcb *stcb;
673 stcb = control->stcb;
676 panic("Control broken");
681 if (control->tail_mbuf == NULL) {
684 sctp_setup_tail_pointer(control);
687 control->tail_mbuf->m_next = m;
689 if (SCTP_BUF_LEN(m) == 0) {
690 /* Skip mbufs with NO length */
693 control->tail_mbuf->m_next = sctp_m_free(m);
694 m = control->tail_mbuf->m_next;
696 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
697 m = SCTP_BUF_NEXT(prev);
700 control->tail_mbuf = prev;
705 if (control->on_read_q) {
707 * On read queue so we must increment the SB stuff,
708 * we assume caller has done any locks of SB.
710 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
712 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
713 m = SCTP_BUF_NEXT(m);
716 control->tail_mbuf = prev;
721 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
723 memset(nc, 0, sizeof(struct sctp_queued_to_read));
724 nc->sinfo_stream = control->sinfo_stream;
725 nc->sinfo_ssn = control->sinfo_ssn;
726 TAILQ_INIT(&nc->reasm);
727 nc->top_fsn = control->top_fsn;
728 nc->msg_id = control->msg_id;
729 nc->sinfo_flags = control->sinfo_flags;
730 nc->sinfo_ppid = control->sinfo_ppid;
731 nc->sinfo_context = control->sinfo_context;
732 nc->fsn_included = 0xffffffff;
733 nc->sinfo_tsn = control->sinfo_tsn;
734 nc->sinfo_cumtsn = control->sinfo_cumtsn;
735 nc->sinfo_assoc_id = control->sinfo_assoc_id;
736 nc->whoFrom = control->whoFrom;
737 atomic_add_int(&nc->whoFrom->ref_count, 1);
738 nc->stcb = control->stcb;
739 nc->port_from = control->port_from;
743 sctp_handle_old_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm,
744 struct sctp_queued_to_read *control, uint32_t pd_point)
747 * Special handling for the old un-ordered data chunk. All the
748 * chunks/TSN's go to msg_id 0. So we have to do the old style
749 * watching to see if we have it all. If you return one, no other
750 * control entries on the un-ordered queue will be looked at. In
751 * theory there should be no others entries in reality, unless the
752 * guy is sending both unordered NDATA and unordered DATA...
754 struct sctp_tmit_chunk *chk, *lchk, *tchk;
756 struct sctp_queued_to_read *nc;
759 if (control->first_frag_seen == 0) {
760 /* Nothing we can do, we have not seen the first piece yet */
763 /* Collapse any we can */
766 fsn = control->fsn_included + 1;
767 /* Now what can we add? */
768 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
769 if (chk->rec.data.fsn_num == fsn) {
771 sctp_alloc_a_readq(stcb, nc);
775 memset(nc, 0, sizeof(struct sctp_queued_to_read));
776 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
777 sctp_add_chk_to_control(control, strm, stcb, asoc, chk);
781 if (control->end_added) {
783 if (!TAILQ_EMPTY(&control->reasm)) {
785 * Ok we have to move anything left
786 * on the control queue to a new
789 sctp_build_readq_entry_from_ctl(nc, control);
790 tchk = TAILQ_FIRST(&control->reasm);
791 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
792 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
793 nc->first_frag_seen = 1;
794 nc->fsn_included = tchk->rec.data.fsn_num;
795 nc->data = tchk->data;
796 sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
798 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
799 sctp_setup_tail_pointer(nc);
800 tchk = TAILQ_FIRST(&control->reasm);
802 /* Spin the rest onto the queue */
804 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
805 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
806 tchk = TAILQ_FIRST(&control->reasm);
809 * Now lets add it to the queue
810 * after removing control
812 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
813 nc->on_strm_q = SCTP_ON_UNORDERED;
814 if (control->on_strm_q) {
815 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
816 control->on_strm_q = 0;
819 if (control->pdapi_started) {
820 strm->pd_api_started = 0;
821 control->pdapi_started = 0;
823 if (control->on_strm_q) {
824 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
825 control->on_strm_q = 0;
826 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
828 if (control->on_read_q == 0) {
829 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
830 &stcb->sctp_socket->so_rcv, control->end_added,
831 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
833 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
834 if (!TAILQ_EMPTY(&nc->reasm) && (nc->first_frag_seen)) {
836 * Switch to the new guy and
842 sctp_free_a_readq(stcb, nc);
846 sctp_free_a_readq(stcb, nc);
853 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
854 strm->pd_api_started = 1;
855 control->pdapi_started = 1;
856 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
857 &stcb->sctp_socket->so_rcv, control->end_added,
858 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
859 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
867 sctp_inject_old_data_unordered(struct sctp_tcb *stcb, struct sctp_association *asoc,
868 struct sctp_queued_to_read *control,
869 struct sctp_tmit_chunk *chk,
872 struct sctp_tmit_chunk *at;
876 * Here we need to place the chunk into the control structure sorted
877 * in the correct order.
879 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
880 /* Its the very first one. */
881 SCTPDBG(SCTP_DEBUG_XXX,
882 "chunk is a first fsn: %u becomes fsn_included\n",
883 chk->rec.data.fsn_num);
884 if (control->first_frag_seen) {
886 * In old un-ordered we can reassembly on one
887 * control multiple messages. As long as the next
888 * FIRST is greater then the old first (TSN i.e. FSN
894 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
896 * Easy way the start of a new guy beyond
901 if ((chk->rec.data.fsn_num == control->fsn_included) ||
902 (control->pdapi_started)) {
904 * Ok this should not happen, if it does we
905 * started the pd-api on the higher TSN
906 * (since the equals part is a TSN failure
909 * We are completly hosed in that case since I
910 * have no way to recover. This really will
911 * only happen if we can get more TSN's
912 * higher before the pd-api-point.
914 sctp_abort_in_reasm(stcb, control, chk,
916 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
921 * Ok we have two firsts and the one we just got is
922 * smaller than the one we previously placed.. yuck!
923 * We must swap them out.
926 tdata = control->data;
927 control->data = chk->data;
929 /* Swap the lengths */
930 tmp = control->length;
931 control->length = chk->send_size;
932 chk->send_size = tmp;
933 /* Fix the FSN included */
934 tmp = control->fsn_included;
935 control->fsn_included = chk->rec.data.fsn_num;
936 chk->rec.data.fsn_num = tmp;
939 control->first_frag_seen = 1;
940 control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
941 control->data = chk->data;
942 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
944 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
945 sctp_setup_tail_pointer(control);
949 if (TAILQ_EMPTY(&control->reasm)) {
950 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
951 asoc->size_on_reasm_queue += chk->send_size;
952 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
955 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
956 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
958 * This one in queue is bigger than the new one,
959 * insert the new one before at.
961 asoc->size_on_reasm_queue += chk->send_size;
962 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
964 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
966 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
968 * They sent a duplicate fsn number. This really
969 * should not happen since the FSN is a TSN and it
970 * should have been dropped earlier.
972 sctp_abort_in_reasm(stcb, control, chk,
974 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
980 asoc->size_on_reasm_queue += chk->send_size;
981 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
982 control->top_fsn = chk->rec.data.fsn_num;
983 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
988 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm)
991 * Given a stream, strm, see if any of the SSN's on it that are
992 * fragmented are ready to deliver. If so go ahead and place them on
993 * the read queue. In so placing if we have hit the end, then we
994 * need to remove them from the stream's queue.
996 struct sctp_queued_to_read *control, *nctl = NULL;
997 uint32_t next_to_del;
1001 if (stcb->sctp_socket) {
1002 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1003 stcb->sctp_ep->partial_delivery_point);
1005 pd_point = stcb->sctp_ep->partial_delivery_point;
1007 control = TAILQ_FIRST(&strm->uno_inqueue);
1009 (asoc->idata_supported == 0)) {
1010 /* Special handling needed for "old" data format */
1011 if (sctp_handle_old_data(stcb, asoc, strm, control, pd_point)) {
1015 if (strm->pd_api_started) {
1016 /* Can't add more */
1020 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1021 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
1022 nctl = TAILQ_NEXT(control, next_instrm);
1023 if (control->end_added) {
1024 /* We just put the last bit on */
1025 if (control->on_strm_q) {
1027 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1028 panic("Huh control: %p on_q: %d -- not unordered?",
1029 control, control->on_strm_q);
1032 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1033 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1034 control->on_strm_q = 0;
1036 if (control->on_read_q == 0) {
1037 sctp_add_to_readq(stcb->sctp_ep, stcb,
1039 &stcb->sctp_socket->so_rcv, control->end_added,
1040 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1043 /* Can we do a PD-API for this un-ordered guy? */
1044 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1045 strm->pd_api_started = 1;
1046 control->pdapi_started = 1;
1047 sctp_add_to_readq(stcb->sctp_ep, stcb,
1049 &stcb->sctp_socket->so_rcv, control->end_added,
1050 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1058 control = TAILQ_FIRST(&strm->inqueue);
1059 if (strm->pd_api_started) {
1060 /* Can't add more */
1063 if (control == NULL) {
1066 if (strm->last_sequence_delivered == control->sinfo_ssn) {
1068 * Ok the guy at the top was being partially delivered
1069 * completed, so we remove it. Note the pd_api flag was
1070 * taken off when the chunk was merged on in
1071 * sctp_queue_data_for_reasm below.
1073 nctl = TAILQ_NEXT(control, next_instrm);
1074 SCTPDBG(SCTP_DEBUG_XXX,
1075 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1076 control, control->end_added, control->sinfo_ssn,
1077 control->top_fsn, control->fsn_included,
1078 strm->last_sequence_delivered);
1079 if (control->end_added) {
1080 if (control->on_strm_q) {
1082 if (control->on_strm_q != SCTP_ON_ORDERED) {
1083 panic("Huh control: %p on_q: %d -- not ordered?",
1084 control, control->on_strm_q);
1087 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1088 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1089 control->on_strm_q = 0;
1091 if (strm->pd_api_started && control->pdapi_started) {
1092 control->pdapi_started = 0;
1093 strm->pd_api_started = 0;
1095 if (control->on_read_q == 0) {
1096 sctp_add_to_readq(stcb->sctp_ep, stcb,
1098 &stcb->sctp_socket->so_rcv, control->end_added,
1099 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1104 if (strm->pd_api_started) {
1106 * Can't add more must have gotten an un-ordered above being
1107 * partially delivered.
1112 next_to_del = strm->last_sequence_delivered + 1;
1114 SCTPDBG(SCTP_DEBUG_XXX,
1115 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1116 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
1118 nctl = TAILQ_NEXT(control, next_instrm);
1119 if ((control->sinfo_ssn == next_to_del) &&
1120 (control->first_frag_seen)) {
1123 /* Ok we can deliver it onto the stream. */
1124 if (control->end_added) {
1125 /* We are done with it afterwards */
1126 if (control->on_strm_q) {
1128 if (control->on_strm_q != SCTP_ON_ORDERED) {
1129 panic("Huh control: %p on_q: %d -- not ordered?",
1130 control, control->on_strm_q);
1133 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1134 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1135 control->on_strm_q = 0;
1139 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1141 * A singleton now slipping through - mark
1142 * it non-revokable too
1144 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1145 } else if (control->end_added == 0) {
1147 * Check if we can defer adding until its
1150 if ((control->length < pd_point) || (strm->pd_api_started)) {
1152 * Don't need it or cannot add more
1153 * (one being delivered that way)
1158 done = (control->end_added) && (control->last_frag_seen);
1159 if (control->on_read_q == 0) {
1160 sctp_add_to_readq(stcb->sctp_ep, stcb,
1162 &stcb->sctp_socket->so_rcv, control->end_added,
1163 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1165 strm->last_sequence_delivered = next_to_del;
1170 /* We are now doing PD API */
1171 strm->pd_api_started = 1;
1172 control->pdapi_started = 1;
1181 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1182 struct sctp_stream_in *strm,
1183 struct sctp_tcb *stcb, struct sctp_association *asoc,
1184 struct sctp_tmit_chunk *chk)
1187 * Given a control and a chunk, merge the data from the chk onto the
1188 * control and free up the chunk resources.
1192 if (control->on_read_q) {
1194 * Its being pd-api'd so we must do some locks.
1196 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1199 if (control->data == NULL) {
1200 control->data = chk->data;
1201 sctp_setup_tail_pointer(control);
1203 sctp_add_to_tail_pointer(control, chk->data);
1205 control->fsn_included = chk->rec.data.fsn_num;
1206 asoc->size_on_reasm_queue -= chk->send_size;
1207 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1208 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1210 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1211 control->first_frag_seen = 1;
1213 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1215 if ((control->on_strm_q) && (control->on_read_q)) {
1216 if (control->pdapi_started) {
1217 control->pdapi_started = 0;
1218 strm->pd_api_started = 0;
1220 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1222 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1223 control->on_strm_q = 0;
1224 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1226 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1227 control->on_strm_q = 0;
1229 } else if (control->on_strm_q) {
1230 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1231 control->on_strm_q);
1235 control->end_added = 1;
1236 control->last_frag_seen = 1;
1239 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1241 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1245 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1246 * queue, see if anthing can be delivered. If so pull it off (or as much as
1247 * we can. If we run out of space then we must dump what we can and set the
1248 * appropriate flag to say we queued what we could.
1251 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1252 struct sctp_stream_in *strm,
1253 struct sctp_queued_to_read *control,
1254 struct sctp_tmit_chunk *chk,
1255 int created_control,
1256 int *abort_flag, uint32_t tsn)
1259 struct sctp_tmit_chunk *at, *nat;
1260 int do_wakeup, unordered;
1263 * For old un-ordered data chunks.
1265 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1270 /* Must be added to the stream-in queue */
1271 if (created_control) {
1272 if (sctp_place_control_in_stream(strm, asoc, control)) {
1273 /* Duplicate SSN? */
1274 clean_up_control(stcb, control);
1275 sctp_abort_in_reasm(stcb, control, chk,
1277 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1280 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1282 * Ok we created this control and now lets validate
1283 * that its legal i.e. there is a B bit set, if not
1284 * and we have up to the cum-ack then its invalid.
1286 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1287 sctp_abort_in_reasm(stcb, control, chk,
1289 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1294 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1295 sctp_inject_old_data_unordered(stcb, asoc, control, chk, abort_flag);
1299 * Ok we must queue the chunk into the reasembly portion: o if its
1300 * the first it goes to the control mbuf. o if its not first but the
1301 * next in sequence it goes to the control, and each succeeding one
1302 * in order also goes. o if its not in order we place it on the list
1305 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1306 /* Its the very first one. */
1307 SCTPDBG(SCTP_DEBUG_XXX,
1308 "chunk is a first fsn: %u becomes fsn_included\n",
1309 chk->rec.data.fsn_num);
1310 if (control->first_frag_seen) {
1312 * Error on senders part, they either sent us two
1313 * data chunks with FIRST, or they sent two
1314 * un-ordered chunks that were fragmented at the
1315 * same time in the same stream.
1317 sctp_abort_in_reasm(stcb, control, chk,
1319 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1322 control->first_frag_seen = 1;
1323 control->fsn_included = chk->rec.data.fsn_num;
1324 control->data = chk->data;
1325 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1327 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1328 sctp_setup_tail_pointer(control);
1330 /* Place the chunk in our list */
1333 if (control->last_frag_seen == 0) {
1334 /* Still willing to raise highest FSN seen */
1335 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1336 SCTPDBG(SCTP_DEBUG_XXX,
1337 "We have a new top_fsn: %u\n",
1338 chk->rec.data.fsn_num);
1339 control->top_fsn = chk->rec.data.fsn_num;
1341 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1342 SCTPDBG(SCTP_DEBUG_XXX,
1343 "The last fsn is now in place fsn: %u\n",
1344 chk->rec.data.fsn_num);
1345 control->last_frag_seen = 1;
1347 if (asoc->idata_supported || control->first_frag_seen) {
1349 * For IDATA we always check since we know
1350 * that the first fragment is 0. For old
1351 * DATA we have to receive the first before
1352 * we know the first FSN (which is the TSN).
1354 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1356 * We have already delivered up to
1359 sctp_abort_in_reasm(stcb, control, chk,
1361 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1366 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1367 /* Second last? huh? */
1368 SCTPDBG(SCTP_DEBUG_XXX,
1369 "Duplicate last fsn: %u (top: %u) -- abort\n",
1370 chk->rec.data.fsn_num, control->top_fsn);
1371 sctp_abort_in_reasm(stcb, control,
1373 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1376 if (asoc->idata_supported || control->first_frag_seen) {
1378 * For IDATA we always check since we know
1379 * that the first fragment is 0. For old
1380 * DATA we have to receive the first before
1381 * we know the first FSN (which is the TSN).
1384 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1386 * We have already delivered up to
1389 SCTPDBG(SCTP_DEBUG_XXX,
1390 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1391 chk->rec.data.fsn_num, control->fsn_included);
1392 sctp_abort_in_reasm(stcb, control, chk,
1394 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1399 * validate not beyond top FSN if we have seen last
1402 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1403 SCTPDBG(SCTP_DEBUG_XXX,
1404 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1405 chk->rec.data.fsn_num,
1407 sctp_abort_in_reasm(stcb, control, chk,
1409 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1414 * If we reach here, we need to place the new chunk in the
1415 * reassembly for this control.
1417 SCTPDBG(SCTP_DEBUG_XXX,
1418 "chunk is a not first fsn: %u needs to be inserted\n",
1419 chk->rec.data.fsn_num);
1420 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1421 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
1423 * This one in queue is bigger than the new
1424 * one, insert the new one before at.
1426 SCTPDBG(SCTP_DEBUG_XXX,
1427 "Insert it before fsn: %u\n",
1428 at->rec.data.fsn_num);
1429 asoc->size_on_reasm_queue += chk->send_size;
1430 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1431 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1434 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
1436 * Gak, He sent me a duplicate str seq
1440 * foo bar, I guess I will just free this
1441 * new guy, should we abort too? FIX ME
1442 * MAYBE? Or it COULD be that the SSN's have
1443 * wrapped. Maybe I should compare to TSN
1444 * somehow... sigh for now just blow away
1447 SCTPDBG(SCTP_DEBUG_XXX,
1448 "Duplicate to fsn: %u -- abort\n",
1449 at->rec.data.fsn_num);
1450 sctp_abort_in_reasm(stcb, control,
1452 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1456 if (inserted == 0) {
1457 /* Goes on the end */
1458 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1459 chk->rec.data.fsn_num);
1460 asoc->size_on_reasm_queue += chk->send_size;
1461 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1462 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1466 * Ok lets see if we can suck any up into the control structure that
1467 * are in seq if it makes sense.
1471 * If the first fragment has not been seen there is no sense in
1474 if (control->first_frag_seen) {
1475 next_fsn = control->fsn_included + 1;
1476 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1477 if (at->rec.data.fsn_num == next_fsn) {
1478 /* We can add this one now to the control */
1479 SCTPDBG(SCTP_DEBUG_XXX,
1480 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1482 at->rec.data.fsn_num,
1483 next_fsn, control->fsn_included);
1484 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1485 sctp_add_chk_to_control(control, strm, stcb, asoc, at);
1486 if (control->on_read_q) {
1490 if (control->end_added && control->pdapi_started) {
1491 if (strm->pd_api_started) {
1492 strm->pd_api_started = 0;
1493 control->pdapi_started = 0;
1495 if (control->on_read_q == 0) {
1496 sctp_add_to_readq(stcb->sctp_ep, stcb,
1498 &stcb->sctp_socket->so_rcv, control->end_added,
1499 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1510 /* Need to wakeup the reader */
1511 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1515 static struct sctp_queued_to_read *
1516 find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
1518 struct sctp_queued_to_read *control;
1521 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1522 if (control->msg_id == msg_id) {
1528 control = TAILQ_FIRST(&strm->uno_inqueue);
1531 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1532 if (control->msg_id == msg_id) {
1541 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1542 struct mbuf **m, int offset, int chk_length,
1543 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1544 int *break_flag, int last_chunk, uint8_t chtype)
1546 /* Process a data chunk */
1547 /* struct sctp_tmit_chunk *chk; */
1548 struct sctp_data_chunk *ch;
1549 struct sctp_idata_chunk *nch, chunk_buf;
1550 struct sctp_tmit_chunk *chk;
1551 uint32_t tsn, fsn, gap, msg_id;
1554 int need_reasm_check = 0;
1556 struct mbuf *op_err;
1557 char msg[SCTP_DIAG_INFO_LEN];
1558 struct sctp_queued_to_read *control = NULL;
1559 uint32_t protocol_id;
1560 uint8_t chunk_flags;
1561 struct sctp_stream_reset_list *liste;
1562 struct sctp_stream_in *strm;
1565 int created_control = 0;
1569 if (chtype == SCTP_IDATA) {
1570 nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1571 sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1572 ch = (struct sctp_data_chunk *)nch;
1573 clen = sizeof(struct sctp_idata_chunk);
1574 tsn = ntohl(ch->dp.tsn);
1575 msg_id = ntohl(nch->dp.msg_id);
1576 if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1579 fsn = ntohl(nch->dp.ppid_fsn.fsn);
1582 ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1583 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1584 tsn = ntohl(ch->dp.tsn);
1585 clen = sizeof(struct sctp_data_chunk);
1587 msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
1591 chunk_flags = ch->ch.chunk_flags;
1592 if ((size_t)chk_length == clen) {
1594 * Need to send an abort since we had a empty data chunk.
1596 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1597 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1598 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1602 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1603 asoc->send_sack = 1;
1605 protocol_id = ch->dp.protocol_id;
1606 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1607 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1608 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1613 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1614 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1615 /* It is a duplicate */
1616 SCTP_STAT_INCR(sctps_recvdupdata);
1617 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1618 /* Record a dup for the next outbound sack */
1619 asoc->dup_tsns[asoc->numduptsns] = tsn;
1622 asoc->send_sack = 1;
1625 /* Calculate the number of TSN's between the base and this TSN */
1626 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1627 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1628 /* Can't hold the bit in the mapping at max array, toss it */
1631 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1632 SCTP_TCB_LOCK_ASSERT(stcb);
1633 if (sctp_expand_mapping_array(asoc, gap)) {
1634 /* Can't expand, drop it */
1638 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1641 /* See if we have received this one already */
1642 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1643 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1644 SCTP_STAT_INCR(sctps_recvdupdata);
1645 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1646 /* Record a dup for the next outbound sack */
1647 asoc->dup_tsns[asoc->numduptsns] = tsn;
1650 asoc->send_sack = 1;
1654 * Check to see about the GONE flag, duplicates would cause a sack
1655 * to be sent up above
1657 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1658 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1659 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1661 * wait a minute, this guy is gone, there is no longer a
1662 * receiver. Send peer an ABORT!
1664 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1665 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1670 * Now before going further we see if there is room. If NOT then we
1671 * MAY let one through only IF this TSN is the one we are waiting
1672 * for on a partial delivery API.
1675 /* Is the stream valid? */
1676 strmno = ntohs(ch->dp.stream_id);
1678 if (strmno >= asoc->streamincnt) {
1679 struct sctp_error_invalid_stream *cause;
1681 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1682 0, M_NOWAIT, 1, MT_DATA);
1683 if (op_err != NULL) {
1684 /* add some space up front so prepend will work well */
1685 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1686 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1688 * Error causes are just param's and this one has
1689 * two back to back phdr, one with the error type
1690 * and size, the other with the streamid and a rsvd
1692 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1693 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1694 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1695 cause->stream_id = ch->dp.stream_id;
1696 cause->reserved = htons(0);
1697 sctp_queue_op_err(stcb, op_err);
1699 SCTP_STAT_INCR(sctps_badsid);
1700 SCTP_TCB_LOCK_ASSERT(stcb);
1701 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1702 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1703 asoc->highest_tsn_inside_nr_map = tsn;
1705 if (tsn == (asoc->cumulative_tsn + 1)) {
1706 /* Update cum-ack */
1707 asoc->cumulative_tsn = tsn;
1711 strm = &asoc->strmin[strmno];
1713 * If its a fragmented message, lets see if we can find the control
1714 * on the reassembly queues.
1716 if ((chtype == SCTP_IDATA) && ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) && (fsn == 0)) {
1718 * The first *must* be fsn 0, and other (middle/end) pieces
1719 * can *not* be fsn 0.
1723 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1724 /* See if we can find the re-assembly entity */
1725 control = find_reasm_entry(strm, msg_id, ordered, old_data);
1726 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1727 chunk_flags, control);
1729 /* We found something, does it belong? */
1730 if (ordered && (msg_id != control->sinfo_ssn)) {
1732 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1733 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1734 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1738 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1740 * We can't have a switched order with an
1745 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1747 * We can't have a switched unordered with a
1755 * Its a complete segment. Lets validate we don't have a
1756 * re-assembly going on with the same Stream/Seq (for
1757 * ordered) or in the same Stream for unordered.
1759 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for msg in case we have dup\n",
1761 if (find_reasm_entry(strm, msg_id, ordered, old_data)) {
1762 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n",
1769 /* now do the tests */
1770 if (((asoc->cnt_on_all_streams +
1771 asoc->cnt_on_reasm_queue +
1772 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1773 (((int)asoc->my_rwnd) <= 0)) {
1775 * When we have NO room in the rwnd we check to make sure
1776 * the reader is doing its job...
1778 if (stcb->sctp_socket->so_rcv.sb_cc) {
1779 /* some to read, wake-up */
1780 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1783 so = SCTP_INP_SO(stcb->sctp_ep);
1784 atomic_add_int(&stcb->asoc.refcnt, 1);
1785 SCTP_TCB_UNLOCK(stcb);
1786 SCTP_SOCKET_LOCK(so, 1);
1787 SCTP_TCB_LOCK(stcb);
1788 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1789 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1790 /* assoc was freed while we were unlocked */
1791 SCTP_SOCKET_UNLOCK(so, 1);
1795 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1796 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1797 SCTP_SOCKET_UNLOCK(so, 1);
1800 /* now is it in the mapping array of what we have accepted? */
1802 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1803 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1804 /* Nope not in the valid range dump it */
1806 sctp_set_rwnd(stcb, asoc);
1807 if ((asoc->cnt_on_all_streams +
1808 asoc->cnt_on_reasm_queue +
1809 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1810 SCTP_STAT_INCR(sctps_datadropchklmt);
1812 SCTP_STAT_INCR(sctps_datadroprwnd);
1818 if (control == NULL) {
1821 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1826 #ifdef SCTP_ASOCLOG_OF_TSNS
1827 SCTP_TCB_LOCK_ASSERT(stcb);
1828 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1829 asoc->tsn_in_at = 0;
1830 asoc->tsn_in_wrapped = 1;
1832 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1833 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1834 asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
1835 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1836 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1837 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1838 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1839 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1843 * Before we continue lets validate that we are not being fooled by
1844 * an evil attacker. We can only have Nk chunks based on our TSN
1845 * spread allowed by the mapping array N * 8 bits, so there is no
1846 * way our stream sequence numbers could have wrapped. We of course
1847 * only validate the FIRST fragment so the bit must be set.
1849 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1850 (TAILQ_EMPTY(&asoc->resetHead)) &&
1851 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1852 SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
1853 /* The incoming sseq is behind where we last delivered? */
1854 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1855 msg_id, asoc->strmin[strmno].last_sequence_delivered);
1857 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1858 asoc->strmin[strmno].last_sequence_delivered,
1859 tsn, strmno, msg_id);
1860 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1861 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1862 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1866 /************************************
1867 * From here down we may find ch-> invalid
1868 * so its a good idea NOT to use it.
1869 *************************************/
1871 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1873 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1875 if (last_chunk == 0) {
1877 dmbuf = SCTP_M_COPYM(*m,
1878 (offset + sizeof(struct sctp_idata_chunk)),
1881 dmbuf = SCTP_M_COPYM(*m,
1882 (offset + sizeof(struct sctp_data_chunk)),
1885 #ifdef SCTP_MBUF_LOGGING
1886 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1887 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1891 /* We can steal the last chunk */
1895 /* lop off the top part */
1897 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1899 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1901 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1902 l_len = SCTP_BUF_LEN(dmbuf);
1905 * need to count up the size hopefully does not hit
1911 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1912 l_len += SCTP_BUF_LEN(lat);
1915 if (l_len > the_len) {
1916 /* Trim the end round bytes off too */
1917 m_adj(dmbuf, -(l_len - the_len));
1920 if (dmbuf == NULL) {
1921 SCTP_STAT_INCR(sctps_nomem);
1925 * Now no matter what we need a control, get one if we don't have
1926 * one (we may have gotten it above when we found the message was
1929 if (control == NULL) {
1930 sctp_alloc_a_readq(stcb, control);
1931 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1936 if (control == NULL) {
1937 SCTP_STAT_INCR(sctps_nomem);
1940 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1941 control->data = dmbuf;
1942 control->tail_mbuf = NULL;
1943 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1944 control->top_fsn = control->fsn_included = fsn;
1946 created_control = 1;
1948 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n",
1949 chunk_flags, ordered, msg_id, control);
1950 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1951 TAILQ_EMPTY(&asoc->resetHead) &&
1953 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
1954 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1955 /* Candidate for express delivery */
1957 * Its not fragmented, No PD-API is up, Nothing in the
1958 * delivery queue, Its un-ordered OR ordered and the next to
1959 * deliver AND nothing else is stuck on the stream queue,
1960 * And there is room for it in the socket buffer. Lets just
1961 * stuff it up the buffer....
1963 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1964 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1965 asoc->highest_tsn_inside_nr_map = tsn;
1967 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n",
1970 sctp_add_to_readq(stcb->sctp_ep, stcb,
1971 control, &stcb->sctp_socket->so_rcv,
1972 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1974 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1975 /* for ordered, bump what we delivered */
1976 strm->last_sequence_delivered++;
1978 SCTP_STAT_INCR(sctps_recvexpress);
1979 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1980 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
1981 SCTP_STR_LOG_FROM_EXPRS_DEL);
1984 goto finish_express_del;
1986 /* Now will we need a chunk too? */
1987 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1988 sctp_alloc_a_chunk(stcb, chk);
1990 /* No memory so we drop the chunk */
1991 SCTP_STAT_INCR(sctps_nomem);
1992 if (last_chunk == 0) {
1993 /* we copied it, free the copy */
1994 sctp_m_freem(dmbuf);
1998 chk->rec.data.TSN_seq = tsn;
1999 chk->no_fr_allowed = 0;
2000 chk->rec.data.fsn_num = fsn;
2001 chk->rec.data.stream_seq = msg_id;
2002 chk->rec.data.stream_number = strmno;
2003 chk->rec.data.payloadtype = protocol_id;
2004 chk->rec.data.context = stcb->asoc.context;
2005 chk->rec.data.doing_fast_retransmit = 0;
2006 chk->rec.data.rcv_flags = chunk_flags;
2008 chk->send_size = the_len;
2010 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n",
2013 atomic_add_int(&net->ref_count, 1);
2016 /* Set the appropriate TSN mark */
2017 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2018 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2019 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2020 asoc->highest_tsn_inside_nr_map = tsn;
2023 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2024 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2025 asoc->highest_tsn_inside_map = tsn;
2028 /* Now is it complete (i.e. not fragmented)? */
2029 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2031 * Special check for when streams are resetting. We could be
2032 * more smart about this and check the actual stream to see
2033 * if it is not being reset.. that way we would not create a
2034 * HOLB when amongst streams being reset and those not being
2038 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2039 SCTP_TSN_GT(tsn, liste->tsn)) {
2041 * yep its past where we need to reset... go ahead
2044 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2046 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2048 struct sctp_queued_to_read *ctlOn, *nctlOn;
2049 unsigned char inserted = 0;
2051 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2052 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2057 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2062 if (inserted == 0) {
2064 * must be put at end, use prevP
2065 * (all setup from loop) to setup
2068 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2071 goto finish_express_del;
2073 if (chunk_flags & SCTP_DATA_UNORDERED) {
2074 /* queue directly into socket buffer */
2075 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n",
2077 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2078 sctp_add_to_readq(stcb->sctp_ep, stcb,
2080 &stcb->sctp_socket->so_rcv, 1,
2081 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2084 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control,
2086 sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2094 goto finish_express_del;
2096 /* If we reach here its a reassembly */
2097 need_reasm_check = 1;
2098 SCTPDBG(SCTP_DEBUG_XXX,
2099 "Queue data to stream for reasm control: %p msg_id: %u\n",
2101 sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2104 * the assoc is now gone and chk was put onto the reasm
2105 * queue, which has all been freed.
2113 /* Here we tidy up things */
2114 if (tsn == (asoc->cumulative_tsn + 1)) {
2115 /* Update cum-ack */
2116 asoc->cumulative_tsn = tsn;
2122 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2124 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2126 SCTP_STAT_INCR(sctps_recvdata);
2127 /* Set it present please */
2128 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2129 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2131 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2132 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2133 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2135 /* check the special flag for stream resets */
2136 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2137 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2139 * we have finished working through the backlogged TSN's now
2140 * time to reset streams. 1: call reset function. 2: free
2141 * pending_reply space 3: distribute any chunks in
2142 * pending_reply_queue.
2144 struct sctp_queued_to_read *ctl, *nctl;
2146 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2147 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2148 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2149 SCTP_FREE(liste, SCTP_M_STRESET);
2150 /* sa_ignore FREED_MEMORY */
2151 liste = TAILQ_FIRST(&asoc->resetHead);
2152 if (TAILQ_EMPTY(&asoc->resetHead)) {
2153 /* All can be removed */
2154 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2155 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2156 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2162 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2163 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2167 * if ctl->sinfo_tsn is <= liste->tsn we can
2168 * process it which is the NOT of
2169 * ctl->sinfo_tsn > liste->tsn
2171 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2172 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2179 * Now service re-assembly to pick up anything that has been
2180 * held on reassembly queue?
2182 (void)sctp_deliver_reasm_check(stcb, asoc, strm);
2183 need_reasm_check = 0;
2185 if (need_reasm_check) {
2186 /* Another one waits ? */
2187 (void)sctp_deliver_reasm_check(stcb, asoc, strm);
2192 static const int8_t sctp_map_lookup_tab[256] = {
2193 0, 1, 0, 2, 0, 1, 0, 3,
2194 0, 1, 0, 2, 0, 1, 0, 4,
2195 0, 1, 0, 2, 0, 1, 0, 3,
2196 0, 1, 0, 2, 0, 1, 0, 5,
2197 0, 1, 0, 2, 0, 1, 0, 3,
2198 0, 1, 0, 2, 0, 1, 0, 4,
2199 0, 1, 0, 2, 0, 1, 0, 3,
2200 0, 1, 0, 2, 0, 1, 0, 6,
2201 0, 1, 0, 2, 0, 1, 0, 3,
2202 0, 1, 0, 2, 0, 1, 0, 4,
2203 0, 1, 0, 2, 0, 1, 0, 3,
2204 0, 1, 0, 2, 0, 1, 0, 5,
2205 0, 1, 0, 2, 0, 1, 0, 3,
2206 0, 1, 0, 2, 0, 1, 0, 4,
2207 0, 1, 0, 2, 0, 1, 0, 3,
2208 0, 1, 0, 2, 0, 1, 0, 7,
2209 0, 1, 0, 2, 0, 1, 0, 3,
2210 0, 1, 0, 2, 0, 1, 0, 4,
2211 0, 1, 0, 2, 0, 1, 0, 3,
2212 0, 1, 0, 2, 0, 1, 0, 5,
2213 0, 1, 0, 2, 0, 1, 0, 3,
2214 0, 1, 0, 2, 0, 1, 0, 4,
2215 0, 1, 0, 2, 0, 1, 0, 3,
2216 0, 1, 0, 2, 0, 1, 0, 6,
2217 0, 1, 0, 2, 0, 1, 0, 3,
2218 0, 1, 0, 2, 0, 1, 0, 4,
2219 0, 1, 0, 2, 0, 1, 0, 3,
2220 0, 1, 0, 2, 0, 1, 0, 5,
2221 0, 1, 0, 2, 0, 1, 0, 3,
2222 0, 1, 0, 2, 0, 1, 0, 4,
2223 0, 1, 0, 2, 0, 1, 0, 3,
2224 0, 1, 0, 2, 0, 1, 0, 8
2229 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2232 * Now we also need to check the mapping array in a couple of ways.
2233 * 1) Did we move the cum-ack point?
2235 * When you first glance at this you might think that all entries that
2236 * make up the position of the cum-ack would be in the nr-mapping
2237 * array only.. i.e. things up to the cum-ack are always
2238 * deliverable. Thats true with one exception, when its a fragmented
2239 * message we may not deliver the data until some threshold (or all
2240 * of it) is in place. So we must OR the nr_mapping_array and
2241 * mapping_array to get a true picture of the cum-ack.
2243 struct sctp_association *asoc;
2246 int slide_from, slide_end, lgap, distance;
2247 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2251 old_cumack = asoc->cumulative_tsn;
2252 old_base = asoc->mapping_array_base_tsn;
2253 old_highest = asoc->highest_tsn_inside_map;
2255 * We could probably improve this a small bit by calculating the
2256 * offset of the current cum-ack as the starting point.
2259 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2260 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2264 /* there is a 0 bit */
2265 at += sctp_map_lookup_tab[val];
2269 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2271 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2272 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2274 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2275 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2277 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2278 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2279 sctp_print_mapping_array(asoc);
2280 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2281 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2283 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2284 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2287 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2288 highest_tsn = asoc->highest_tsn_inside_nr_map;
2290 highest_tsn = asoc->highest_tsn_inside_map;
2292 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2293 /* The complete array was completed by a single FR */
2294 /* highest becomes the cum-ack */
2302 /* clear the array */
2303 clr = ((at + 7) >> 3);
2304 if (clr > asoc->mapping_array_size) {
2305 clr = asoc->mapping_array_size;
2307 memset(asoc->mapping_array, 0, clr);
2308 memset(asoc->nr_mapping_array, 0, clr);
2310 for (i = 0; i < asoc->mapping_array_size; i++) {
2311 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2312 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2313 sctp_print_mapping_array(asoc);
2317 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2318 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2319 } else if (at >= 8) {
2320 /* we can slide the mapping array down */
2321 /* slide_from holds where we hit the first NON 0xff byte */
2324 * now calculate the ceiling of the move using our highest
2327 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2328 slide_end = (lgap >> 3);
2329 if (slide_end < slide_from) {
2330 sctp_print_mapping_array(asoc);
2332 panic("impossible slide");
2334 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2335 lgap, slide_end, slide_from, at);
2339 if (slide_end > asoc->mapping_array_size) {
2341 panic("would overrun buffer");
2343 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2344 asoc->mapping_array_size, slide_end);
2345 slide_end = asoc->mapping_array_size;
2348 distance = (slide_end - slide_from) + 1;
2349 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2350 sctp_log_map(old_base, old_cumack, old_highest,
2351 SCTP_MAP_PREPARE_SLIDE);
2352 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2353 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2355 if (distance + slide_from > asoc->mapping_array_size ||
2358 * Here we do NOT slide forward the array so that
2359 * hopefully when more data comes in to fill it up
2360 * we will be able to slide it forward. Really I
2361 * don't think this should happen :-0
2364 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2365 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2366 (uint32_t) asoc->mapping_array_size,
2367 SCTP_MAP_SLIDE_NONE);
2372 for (ii = 0; ii < distance; ii++) {
2373 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2374 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2377 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2378 asoc->mapping_array[ii] = 0;
2379 asoc->nr_mapping_array[ii] = 0;
2381 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2382 asoc->highest_tsn_inside_map += (slide_from << 3);
2384 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2385 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2387 asoc->mapping_array_base_tsn += (slide_from << 3);
2388 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2389 sctp_log_map(asoc->mapping_array_base_tsn,
2390 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2391 SCTP_MAP_SLIDE_RESULT);
2398 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2400 struct sctp_association *asoc;
2401 uint32_t highest_tsn;
2404 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2405 highest_tsn = asoc->highest_tsn_inside_nr_map;
2407 highest_tsn = asoc->highest_tsn_inside_map;
2411 * Now we need to see if we need to queue a sack or just start the
2412 * timer (if allowed).
2414 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2416 * Ok special case, in SHUTDOWN-SENT case. here we maker
2417 * sure SACK timer is off and instead send a SHUTDOWN and a
2420 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2421 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2422 stcb->sctp_ep, stcb, NULL,
2423 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2425 sctp_send_shutdown(stcb,
2426 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2427 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2431 /* is there a gap now ? */
2432 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2435 * CMT DAC algorithm: increase number of packets received
2438 stcb->asoc.cmt_dac_pkts_rcvd++;
2440 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2442 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2444 (stcb->asoc.numduptsns) || /* we have dup's */
2445 (is_a_gap) || /* is still a gap */
2446 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2447 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2450 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2451 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2452 (stcb->asoc.send_sack == 0) &&
2453 (stcb->asoc.numduptsns == 0) &&
2454 (stcb->asoc.delayed_ack) &&
2455 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2458 * CMT DAC algorithm: With CMT, delay acks
2459 * even in the face of
2461 * reordering. Therefore, if acks that do not
2462 * have to be sent because of the above
2463 * reasons, will be delayed. That is, acks
2464 * that would have been sent due to gap
2465 * reports will be delayed with DAC. Start
2466 * the delayed ack timer.
2468 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2469 stcb->sctp_ep, stcb, NULL);
2472 * Ok we must build a SACK since the timer
2473 * is pending, we got our first packet OR
2474 * there are gaps or duplicates.
2476 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2477 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2480 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2481 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2482 stcb->sctp_ep, stcb, NULL);
2489 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2490 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2491 struct sctp_nets *net, uint32_t * high_tsn)
2493 struct sctp_chunkhdr *ch, chunk_buf;
2494 struct sctp_association *asoc;
2495 int num_chunks = 0; /* number of control chunks processed */
2497 int chk_length, break_flag, last_chunk;
2498 int abort_flag = 0, was_a_gap;
2500 uint32_t highest_tsn;
2503 sctp_set_rwnd(stcb, &stcb->asoc);
2506 SCTP_TCB_LOCK_ASSERT(stcb);
2508 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2509 highest_tsn = asoc->highest_tsn_inside_nr_map;
2511 highest_tsn = asoc->highest_tsn_inside_map;
2513 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2515 * setup where we got the last DATA packet from for any SACK that
2516 * may need to go out. Don't bump the net. This is done ONLY when a
2517 * chunk is assigned.
2519 asoc->last_data_chunk_from = net;
2522 * Now before we proceed we must figure out if this is a wasted
2523 * cluster... i.e. it is a small packet sent in and yet the driver
2524 * underneath allocated a full cluster for it. If so we must copy it
2525 * to a smaller mbuf and free up the cluster mbuf. This will help
2526 * with cluster starvation. Note for __Panda__ we don't do this
2527 * since it has clusters all the way down to 64 bytes.
2529 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2530 /* we only handle mbufs that are singletons.. not chains */
2531 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2533 /* ok lets see if we can copy the data up */
2536 /* get the pointers and copy */
2537 to = mtod(m, caddr_t *);
2538 from = mtod((*mm), caddr_t *);
2539 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2540 /* copy the length and free up the old */
2541 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2543 /* success, back copy */
2546 /* We are in trouble in the mbuf world .. yikes */
2550 /* get pointer to the first chunk header */
2551 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2552 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2557 * process all DATA chunks...
2559 *high_tsn = asoc->cumulative_tsn;
2561 asoc->data_pkts_seen++;
2562 while (stop_proc == 0) {
2563 /* validate chunk length */
2564 chk_length = ntohs(ch->chunk_length);
2565 if (length - *offset < chk_length) {
2566 /* all done, mutulated chunk */
2570 if ((asoc->idata_supported == 1) &&
2571 (ch->chunk_type == SCTP_DATA)) {
2572 struct mbuf *op_err;
2573 char msg[SCTP_DIAG_INFO_LEN];
2575 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2576 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2577 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2578 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2581 if ((asoc->idata_supported == 0) &&
2582 (ch->chunk_type == SCTP_IDATA)) {
2583 struct mbuf *op_err;
2584 char msg[SCTP_DIAG_INFO_LEN];
2586 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2587 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2588 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2589 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2592 if ((ch->chunk_type == SCTP_DATA) ||
2593 (ch->chunk_type == SCTP_IDATA)) {
2596 if (ch->chunk_type == SCTP_DATA) {
2597 clen = sizeof(struct sctp_data_chunk);
2599 clen = sizeof(struct sctp_idata_chunk);
2601 if (chk_length < clen) {
2603 * Need to send an abort since we had a
2604 * invalid data chunk.
2606 struct mbuf *op_err;
2607 char msg[SCTP_DIAG_INFO_LEN];
2609 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2611 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2612 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2613 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2616 #ifdef SCTP_AUDITING_ENABLED
2617 sctp_audit_log(0xB1, 0);
2619 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2624 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2625 chk_length, net, high_tsn, &abort_flag, &break_flag,
2626 last_chunk, ch->chunk_type)) {
2634 * Set because of out of rwnd space and no
2635 * drop rep space left.
2641 /* not a data chunk in the data region */
2642 switch (ch->chunk_type) {
2643 case SCTP_INITIATION:
2644 case SCTP_INITIATION_ACK:
2645 case SCTP_SELECTIVE_ACK:
2646 case SCTP_NR_SELECTIVE_ACK:
2647 case SCTP_HEARTBEAT_REQUEST:
2648 case SCTP_HEARTBEAT_ACK:
2649 case SCTP_ABORT_ASSOCIATION:
2651 case SCTP_SHUTDOWN_ACK:
2652 case SCTP_OPERATION_ERROR:
2653 case SCTP_COOKIE_ECHO:
2654 case SCTP_COOKIE_ACK:
2657 case SCTP_SHUTDOWN_COMPLETE:
2658 case SCTP_AUTHENTICATION:
2659 case SCTP_ASCONF_ACK:
2660 case SCTP_PACKET_DROPPED:
2661 case SCTP_STREAM_RESET:
2662 case SCTP_FORWARD_CUM_TSN:
2666 * Now, what do we do with KNOWN
2667 * chunks that are NOT in the right
2670 * For now, I do nothing but ignore
2671 * them. We may later want to add
2672 * sysctl stuff to switch out and do
2673 * either an ABORT() or possibly
2676 struct mbuf *op_err;
2677 char msg[SCTP_DIAG_INFO_LEN];
2679 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2681 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2682 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2686 /* unknown chunk type, use bit rules */
2687 if (ch->chunk_type & 0x40) {
2688 /* Add a error report to the queue */
2689 struct mbuf *op_err;
2690 struct sctp_gen_error_cause *cause;
2692 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2693 0, M_NOWAIT, 1, MT_DATA);
2694 if (op_err != NULL) {
2695 cause = mtod(op_err, struct sctp_gen_error_cause *);
2696 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2697 cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2698 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2699 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2700 if (SCTP_BUF_NEXT(op_err) != NULL) {
2701 sctp_queue_op_err(stcb, op_err);
2703 sctp_m_freem(op_err);
2707 if ((ch->chunk_type & 0x80) == 0) {
2708 /* discard the rest of this packet */
2710 } /* else skip this bad chunk and
2713 } /* switch of chunk type */
2715 *offset += SCTP_SIZE32(chk_length);
2716 if ((*offset >= length) || stop_proc) {
2717 /* no more data left in the mbuf chain */
2721 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2722 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2731 * we need to report rwnd overrun drops.
2733 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2737 * Did we get data, if so update the time for auto-close and
2738 * give peer credit for being alive.
2740 SCTP_STAT_INCR(sctps_recvpktwithdata);
2741 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2742 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2743 stcb->asoc.overall_error_count,
2745 SCTP_FROM_SCTP_INDATA,
2748 stcb->asoc.overall_error_count = 0;
2749 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2751 /* now service all of the reassm queue if needed */
2752 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2753 /* Assure that we ack right away */
2754 stcb->asoc.send_sack = 1;
2756 /* Start a sack timer or QUEUE a SACK for sending */
2757 sctp_sack_check(stcb, was_a_gap);
2762 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2763 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2765 uint32_t * biggest_newly_acked_tsn,
2766 uint32_t * this_sack_lowest_newack,
2769 struct sctp_tmit_chunk *tp1;
2770 unsigned int theTSN;
2771 int j, wake_him = 0, circled = 0;
2773 /* Recover the tp1 we last saw */
2776 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2778 for (j = frag_strt; j <= frag_end; j++) {
2779 theTSN = j + last_tsn;
2781 if (tp1->rec.data.doing_fast_retransmit)
2785 * CMT: CUCv2 algorithm. For each TSN being
2786 * processed from the sent queue, track the
2787 * next expected pseudo-cumack, or
2788 * rtx_pseudo_cumack, if required. Separate
2789 * cumack trackers for first transmissions,
2790 * and retransmissions.
2792 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2793 (tp1->whoTo->find_pseudo_cumack == 1) &&
2794 (tp1->snd_count == 1)) {
2795 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2796 tp1->whoTo->find_pseudo_cumack = 0;
2798 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2799 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2800 (tp1->snd_count > 1)) {
2801 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2802 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2804 if (tp1->rec.data.TSN_seq == theTSN) {
2805 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2807 * must be held until
2810 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2812 * If it is less than RESEND, it is
2813 * now no-longer in flight.
2814 * Higher values may already be set
2815 * via previous Gap Ack Blocks...
2816 * i.e. ACKED or RESEND.
2818 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2819 *biggest_newly_acked_tsn)) {
2820 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2823 * CMT: SFR algo (and HTNA) - set
2824 * saw_newack to 1 for dest being
2825 * newly acked. update
2826 * this_sack_highest_newack if
2829 if (tp1->rec.data.chunk_was_revoked == 0)
2830 tp1->whoTo->saw_newack = 1;
2832 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2833 tp1->whoTo->this_sack_highest_newack)) {
2834 tp1->whoTo->this_sack_highest_newack =
2835 tp1->rec.data.TSN_seq;
2838 * CMT DAC algo: also update
2839 * this_sack_lowest_newack
2841 if (*this_sack_lowest_newack == 0) {
2842 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2843 sctp_log_sack(*this_sack_lowest_newack,
2845 tp1->rec.data.TSN_seq,
2848 SCTP_LOG_TSN_ACKED);
2850 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2853 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2854 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2855 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2856 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2857 * Separate pseudo_cumack trackers for first transmissions and
2860 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2861 if (tp1->rec.data.chunk_was_revoked == 0) {
2862 tp1->whoTo->new_pseudo_cumack = 1;
2864 tp1->whoTo->find_pseudo_cumack = 1;
2866 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2867 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2869 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2870 if (tp1->rec.data.chunk_was_revoked == 0) {
2871 tp1->whoTo->new_pseudo_cumack = 1;
2873 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2875 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2876 sctp_log_sack(*biggest_newly_acked_tsn,
2878 tp1->rec.data.TSN_seq,
2881 SCTP_LOG_TSN_ACKED);
2883 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2884 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2885 tp1->whoTo->flight_size,
2887 (uint32_t) (uintptr_t) tp1->whoTo,
2888 tp1->rec.data.TSN_seq);
2890 sctp_flight_size_decrease(tp1);
2891 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2892 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2895 sctp_total_flight_decrease(stcb, tp1);
2897 tp1->whoTo->net_ack += tp1->send_size;
2898 if (tp1->snd_count < 2) {
2900 * True non-retransmited chunk
2902 tp1->whoTo->net_ack2 += tp1->send_size;
2910 sctp_calculate_rto(stcb,
2913 &tp1->sent_rcv_time,
2914 sctp_align_safe_nocopy,
2915 SCTP_RTT_FROM_DATA);
2918 if (tp1->whoTo->rto_needed == 0) {
2919 tp1->whoTo->rto_needed = 1;
2925 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2926 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2927 stcb->asoc.this_sack_highest_gap)) {
2928 stcb->asoc.this_sack_highest_gap =
2929 tp1->rec.data.TSN_seq;
2931 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2932 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2933 #ifdef SCTP_AUDITING_ENABLED
2934 sctp_audit_log(0xB2,
2935 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2940 * All chunks NOT UNSENT fall through here and are marked
2941 * (leave PR-SCTP ones that are to skip alone though)
2943 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2944 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2945 tp1->sent = SCTP_DATAGRAM_MARKED;
2947 if (tp1->rec.data.chunk_was_revoked) {
2948 /* deflate the cwnd */
2949 tp1->whoTo->cwnd -= tp1->book_size;
2950 tp1->rec.data.chunk_was_revoked = 0;
2952 /* NR Sack code here */
2954 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2955 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2956 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2959 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2962 if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
2963 (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
2964 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
2965 stcb->asoc.trigger_reset = 1;
2967 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2973 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2974 sctp_m_freem(tp1->data);
2981 } /* if (tp1->TSN_seq == theTSN) */
2982 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2985 tp1 = TAILQ_NEXT(tp1, sctp_next);
2986 if ((tp1 == NULL) && (circled == 0)) {
2988 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2990 } /* end while (tp1) */
2993 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2995 /* In case the fragments were not in order we must reset */
2996 } /* end for (j = fragStart */
2998 return (wake_him); /* Return value only used for nr-sack */
3003 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3004 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3005 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3006 int num_seg, int num_nr_seg, int *rto_ok)
3008 struct sctp_gap_ack_block *frag, block;
3009 struct sctp_tmit_chunk *tp1;
3014 uint16_t frag_strt, frag_end, prev_frag_end;
3016 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3020 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3023 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3025 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3026 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3027 *offset += sizeof(block);
3029 return (chunk_freed);
3031 frag_strt = ntohs(frag->start);
3032 frag_end = ntohs(frag->end);
3034 if (frag_strt > frag_end) {
3035 /* This gap report is malformed, skip it. */
3038 if (frag_strt <= prev_frag_end) {
3039 /* This gap report is not in order, so restart. */
3040 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3042 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3043 *biggest_tsn_acked = last_tsn + frag_end;
3050 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3051 non_revocable, &num_frs, biggest_newly_acked_tsn,
3052 this_sack_lowest_newack, rto_ok)) {
3055 prev_frag_end = frag_end;
3057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3059 sctp_log_fr(*biggest_tsn_acked,
3060 *biggest_newly_acked_tsn,
3061 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3063 return (chunk_freed);
3067 sctp_check_for_revoked(struct sctp_tcb *stcb,
3068 struct sctp_association *asoc, uint32_t cumack,
3069 uint32_t biggest_tsn_acked)
3071 struct sctp_tmit_chunk *tp1;
3073 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3074 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3076 * ok this guy is either ACK or MARKED. If it is
3077 * ACKED it has been previously acked but not this
3078 * time i.e. revoked. If it is MARKED it was ACK'ed
3081 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3084 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3085 /* it has been revoked */
3086 tp1->sent = SCTP_DATAGRAM_SENT;
3087 tp1->rec.data.chunk_was_revoked = 1;
3089 * We must add this stuff back in to assure
3090 * timers and such get started.
3092 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3093 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3094 tp1->whoTo->flight_size,
3096 (uint32_t) (uintptr_t) tp1->whoTo,
3097 tp1->rec.data.TSN_seq);
3099 sctp_flight_size_increase(tp1);
3100 sctp_total_flight_increase(stcb, tp1);
3102 * We inflate the cwnd to compensate for our
3103 * artificial inflation of the flight_size.
3105 tp1->whoTo->cwnd += tp1->book_size;
3106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3107 sctp_log_sack(asoc->last_acked_seq,
3109 tp1->rec.data.TSN_seq,
3112 SCTP_LOG_TSN_REVOKED);
3114 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3115 /* it has been re-acked in this SACK */
3116 tp1->sent = SCTP_DATAGRAM_ACKED;
3119 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3126 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3127 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3129 struct sctp_tmit_chunk *tp1;
3130 int strike_flag = 0;
3132 int tot_retrans = 0;
3133 uint32_t sending_seq;
3134 struct sctp_nets *net;
3135 int num_dests_sacked = 0;
3138 * select the sending_seq, this is either the next thing ready to be
3139 * sent but not transmitted, OR, the next seq we assign.
3141 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3143 sending_seq = asoc->sending_seq;
3145 sending_seq = tp1->rec.data.TSN_seq;
3148 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3149 if ((asoc->sctp_cmt_on_off > 0) &&
3150 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3151 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3152 if (net->saw_newack)
3156 if (stcb->asoc.prsctp_supported) {
3157 (void)SCTP_GETTIME_TIMEVAL(&now);
3159 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3161 if (tp1->no_fr_allowed) {
3162 /* this one had a timeout or something */
3165 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3166 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3167 sctp_log_fr(biggest_tsn_newly_acked,
3168 tp1->rec.data.TSN_seq,
3170 SCTP_FR_LOG_CHECK_STRIKE);
3172 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3173 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3177 if (stcb->asoc.prsctp_supported) {
3178 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3179 /* Is it expired? */
3180 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3181 /* Yes so drop it */
3182 if (tp1->data != NULL) {
3183 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3184 SCTP_SO_NOT_LOCKED);
3190 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3191 /* we are beyond the tsn in the sack */
3194 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3195 /* either a RESEND, ACKED, or MARKED */
3197 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3198 /* Continue strikin FWD-TSN chunks */
3199 tp1->rec.data.fwd_tsn_cnt++;
3204 * CMT : SFR algo (covers part of DAC and HTNA as well)
3206 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3208 * No new acks were receieved for data sent to this
3209 * dest. Therefore, according to the SFR algo for
3210 * CMT, no data sent to this dest can be marked for
3211 * FR using this SACK.
3214 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3215 tp1->whoTo->this_sack_highest_newack)) {
3217 * CMT: New acks were receieved for data sent to
3218 * this dest. But no new acks were seen for data
3219 * sent after tp1. Therefore, according to the SFR
3220 * algo for CMT, tp1 cannot be marked for FR using
3221 * this SACK. This step covers part of the DAC algo
3222 * and the HTNA algo as well.
3227 * Here we check to see if we were have already done a FR
3228 * and if so we see if the biggest TSN we saw in the sack is
3229 * smaller than the recovery point. If so we don't strike
3230 * the tsn... otherwise we CAN strike the TSN.
3233 * @@@ JRI: Check for CMT if (accum_moved &&
3234 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3237 if (accum_moved && asoc->fast_retran_loss_recovery) {
3239 * Strike the TSN if in fast-recovery and cum-ack
3242 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3243 sctp_log_fr(biggest_tsn_newly_acked,
3244 tp1->rec.data.TSN_seq,
3246 SCTP_FR_LOG_STRIKE_CHUNK);
3248 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3251 if ((asoc->sctp_cmt_on_off > 0) &&
3252 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3254 * CMT DAC algorithm: If SACK flag is set to
3255 * 0, then lowest_newack test will not pass
3256 * because it would have been set to the
3257 * cumack earlier. If not already to be
3258 * rtx'd, If not a mixed sack and if tp1 is
3259 * not between two sacked TSNs, then mark by
3260 * one more. NOTE that we are marking by one
3261 * additional time since the SACK DAC flag
3262 * indicates that two packets have been
3263 * received after this missing TSN.
3265 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3266 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3267 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3268 sctp_log_fr(16 + num_dests_sacked,
3269 tp1->rec.data.TSN_seq,
3271 SCTP_FR_LOG_STRIKE_CHUNK);
3276 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3277 (asoc->sctp_cmt_on_off == 0)) {
3279 * For those that have done a FR we must take
3280 * special consideration if we strike. I.e the
3281 * biggest_newly_acked must be higher than the
3282 * sending_seq at the time we did the FR.
3285 #ifdef SCTP_FR_TO_ALTERNATE
3287 * If FR's go to new networks, then we must only do
3288 * this for singly homed asoc's. However if the FR's
3289 * go to the same network (Armando's work) then its
3290 * ok to FR multiple times.
3298 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3299 tp1->rec.data.fast_retran_tsn)) {
3301 * Strike the TSN, since this ack is
3302 * beyond where things were when we
3305 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3306 sctp_log_fr(biggest_tsn_newly_acked,
3307 tp1->rec.data.TSN_seq,
3309 SCTP_FR_LOG_STRIKE_CHUNK);
3311 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3315 if ((asoc->sctp_cmt_on_off > 0) &&
3316 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3318 * CMT DAC algorithm: If
3319 * SACK flag is set to 0,
3320 * then lowest_newack test
3321 * will not pass because it
3322 * would have been set to
3323 * the cumack earlier. If
3324 * not already to be rtx'd,
3325 * If not a mixed sack and
3326 * if tp1 is not between two
3327 * sacked TSNs, then mark by
3328 * one more. NOTE that we
3329 * are marking by one
3330 * additional time since the
3331 * SACK DAC flag indicates
3332 * that two packets have
3333 * been received after this
3336 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3337 (num_dests_sacked == 1) &&
3338 SCTP_TSN_GT(this_sack_lowest_newack,
3339 tp1->rec.data.TSN_seq)) {
3340 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3341 sctp_log_fr(32 + num_dests_sacked,
3342 tp1->rec.data.TSN_seq,
3344 SCTP_FR_LOG_STRIKE_CHUNK);
3346 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3354 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3357 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3358 biggest_tsn_newly_acked)) {
3360 * We don't strike these: This is the HTNA
3361 * algorithm i.e. we don't strike If our TSN is
3362 * larger than the Highest TSN Newly Acked.
3366 /* Strike the TSN */
3367 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3368 sctp_log_fr(biggest_tsn_newly_acked,
3369 tp1->rec.data.TSN_seq,
3371 SCTP_FR_LOG_STRIKE_CHUNK);
3373 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3376 if ((asoc->sctp_cmt_on_off > 0) &&
3377 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3379 * CMT DAC algorithm: If SACK flag is set to
3380 * 0, then lowest_newack test will not pass
3381 * because it would have been set to the
3382 * cumack earlier. If not already to be
3383 * rtx'd, If not a mixed sack and if tp1 is
3384 * not between two sacked TSNs, then mark by
3385 * one more. NOTE that we are marking by one
3386 * additional time since the SACK DAC flag
3387 * indicates that two packets have been
3388 * received after this missing TSN.
3390 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3391 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3392 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3393 sctp_log_fr(48 + num_dests_sacked,
3394 tp1->rec.data.TSN_seq,
3396 SCTP_FR_LOG_STRIKE_CHUNK);
3402 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3403 struct sctp_nets *alt;
3405 /* fix counts and things */
3406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3407 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3408 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3410 (uint32_t) (uintptr_t) tp1->whoTo,
3411 tp1->rec.data.TSN_seq);
3414 tp1->whoTo->net_ack++;
3415 sctp_flight_size_decrease(tp1);
3416 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3417 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3421 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3422 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3423 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3425 /* add back to the rwnd */
3426 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3428 /* remove from the total flight */
3429 sctp_total_flight_decrease(stcb, tp1);
3431 if ((stcb->asoc.prsctp_supported) &&
3432 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3434 * Has it been retransmitted tv_sec times? -
3435 * we store the retran count there.
3437 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3438 /* Yes, so drop it */
3439 if (tp1->data != NULL) {
3440 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3441 SCTP_SO_NOT_LOCKED);
3443 /* Make sure to flag we had a FR */
3444 tp1->whoTo->net_ack++;
3449 * SCTP_PRINTF("OK, we are now ready to FR this
3452 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3453 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3457 /* This is a subsequent FR */
3458 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3460 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3461 if (asoc->sctp_cmt_on_off > 0) {
3463 * CMT: Using RTX_SSTHRESH policy for CMT.
3464 * If CMT is being used, then pick dest with
3465 * largest ssthresh for any retransmission.
3467 tp1->no_fr_allowed = 1;
3469 /* sa_ignore NO_NULL_CHK */
3470 if (asoc->sctp_cmt_pf > 0) {
3472 * JRS 5/18/07 - If CMT PF is on,
3473 * use the PF version of
3476 alt = sctp_find_alternate_net(stcb, alt, 2);
3479 * JRS 5/18/07 - If only CMT is on,
3480 * use the CMT version of
3483 /* sa_ignore NO_NULL_CHK */
3484 alt = sctp_find_alternate_net(stcb, alt, 1);
3490 * CUCv2: If a different dest is picked for
3491 * the retransmission, then new
3492 * (rtx-)pseudo_cumack needs to be tracked
3493 * for orig dest. Let CUCv2 track new (rtx-)
3494 * pseudo-cumack always.
3497 tp1->whoTo->find_pseudo_cumack = 1;
3498 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3500 } else {/* CMT is OFF */
3502 #ifdef SCTP_FR_TO_ALTERNATE
3503 /* Can we find an alternate? */
3504 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3507 * default behavior is to NOT retransmit
3508 * FR's to an alternate. Armando Caro's
3509 * paper details why.
3515 tp1->rec.data.doing_fast_retransmit = 1;
3517 /* mark the sending seq for possible subsequent FR's */
3519 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3520 * (uint32_t)tpi->rec.data.TSN_seq);
3522 if (TAILQ_EMPTY(&asoc->send_queue)) {
3524 * If the queue of send is empty then its
3525 * the next sequence number that will be
3526 * assigned so we subtract one from this to
3527 * get the one we last sent.
3529 tp1->rec.data.fast_retran_tsn = sending_seq;
3532 * If there are chunks on the send queue
3533 * (unsent data that has made it from the
3534 * stream queues but not out the door, we
3535 * take the first one (which will have the
3536 * lowest TSN) and subtract one to get the
3539 struct sctp_tmit_chunk *ttt;
3541 ttt = TAILQ_FIRST(&asoc->send_queue);
3542 tp1->rec.data.fast_retran_tsn =
3543 ttt->rec.data.TSN_seq;
3548 * this guy had a RTO calculation pending on
3551 if ((tp1->whoTo != NULL) &&
3552 (tp1->whoTo->rto_needed == 0)) {
3553 tp1->whoTo->rto_needed = 1;
3557 if (alt != tp1->whoTo) {
3558 /* yes, there is an alternate. */
3559 sctp_free_remote_addr(tp1->whoTo);
3560 /* sa_ignore FREED_MEMORY */
3562 atomic_add_int(&alt->ref_count, 1);
3568 struct sctp_tmit_chunk *
3569 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3570 struct sctp_association *asoc)
3572 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3576 if (asoc->prsctp_supported == 0) {
3579 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3580 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3581 tp1->sent != SCTP_DATAGRAM_RESEND &&
3582 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3583 /* no chance to advance, out of here */
3586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3587 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3588 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3589 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3590 asoc->advanced_peer_ack_point,
3591 tp1->rec.data.TSN_seq, 0, 0);
3594 if (!PR_SCTP_ENABLED(tp1->flags)) {
3596 * We can't fwd-tsn past any that are reliable aka
3597 * retransmitted until the asoc fails.
3602 (void)SCTP_GETTIME_TIMEVAL(&now);
3606 * now we got a chunk which is marked for another
3607 * retransmission to a PR-stream but has run out its chances
3608 * already maybe OR has been marked to skip now. Can we skip
3609 * it if its a resend?
3611 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3612 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3614 * Now is this one marked for resend and its time is
3617 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3618 /* Yes so drop it */
3620 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3621 1, SCTP_SO_NOT_LOCKED);
3625 * No, we are done when hit one for resend
3626 * whos time as not expired.
3632 * Ok now if this chunk is marked to drop it we can clean up
3633 * the chunk, advance our peer ack point and we can check
3636 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3637 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3638 /* advance PeerAckPoint goes forward */
3639 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3640 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3642 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3643 /* No update but we do save the chk */
3648 * If it is still in RESEND we can advance no
3658 sctp_fs_audit(struct sctp_association *asoc)
3660 struct sctp_tmit_chunk *chk;
3661 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3665 int entry_flight, entry_cnt;
3671 entry_flight = asoc->total_flight;
3672 entry_cnt = asoc->total_flight_count;
3674 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3677 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3678 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3679 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3680 chk->rec.data.TSN_seq,
3684 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3686 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3688 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3695 if ((inflight > 0) || (inbetween > 0)) {
3697 panic("Flight size-express incorrect? \n");
3699 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3700 entry_flight, entry_cnt);
3702 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3703 inflight, inbetween, resend, above, acked);
3712 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3713 struct sctp_association *asoc,
3714 struct sctp_tmit_chunk *tp1)
3716 tp1->window_probe = 0;
3717 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3718 /* TSN's skipped we do NOT move back. */
3719 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3720 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3722 (uint32_t) (uintptr_t) tp1->whoTo,
3723 tp1->rec.data.TSN_seq);
3726 /* First setup this by shrinking flight */
3727 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3728 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3731 sctp_flight_size_decrease(tp1);
3732 sctp_total_flight_decrease(stcb, tp1);
3733 /* Now mark for resend */
3734 tp1->sent = SCTP_DATAGRAM_RESEND;
3735 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3737 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3738 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3739 tp1->whoTo->flight_size,
3741 (uint32_t) (uintptr_t) tp1->whoTo,
3742 tp1->rec.data.TSN_seq);
3747 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3748 uint32_t rwnd, int *abort_now, int ecne_seen)
3750 struct sctp_nets *net;
3751 struct sctp_association *asoc;
3752 struct sctp_tmit_chunk *tp1, *tp2;
3754 int win_probe_recovery = 0;
3755 int win_probe_recovered = 0;
3756 int j, done_once = 0;
3760 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3761 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3762 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3764 SCTP_TCB_LOCK_ASSERT(stcb);
3765 #ifdef SCTP_ASOCLOG_OF_TSNS
3766 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3767 stcb->asoc.cumack_log_at++;
3768 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3769 stcb->asoc.cumack_log_at = 0;
3773 old_rwnd = asoc->peers_rwnd;
3774 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3777 } else if (asoc->last_acked_seq == cumack) {
3778 /* Window update sack */
3779 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3780 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3781 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3782 /* SWS sender side engages */
3783 asoc->peers_rwnd = 0;
3785 if (asoc->peers_rwnd > old_rwnd) {
3790 /* First setup for CC stuff */
3791 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3792 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3793 /* Drag along the window_tsn for cwr's */
3794 net->cwr_window_tsn = cumack;
3796 net->prev_cwnd = net->cwnd;
3801 * CMT: Reset CUC and Fast recovery algo variables before
3804 net->new_pseudo_cumack = 0;
3805 net->will_exit_fast_recovery = 0;
3806 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3807 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3810 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3811 tp1 = TAILQ_LAST(&asoc->sent_queue,
3812 sctpchunk_listhead);
3813 send_s = tp1->rec.data.TSN_seq + 1;
3815 send_s = asoc->sending_seq;
3817 if (SCTP_TSN_GE(cumack, send_s)) {
3818 struct mbuf *op_err;
3819 char msg[SCTP_DIAG_INFO_LEN];
3823 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3825 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3826 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3827 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3830 asoc->this_sack_highest_gap = cumack;
3831 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3832 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3833 stcb->asoc.overall_error_count,
3835 SCTP_FROM_SCTP_INDATA,
3838 stcb->asoc.overall_error_count = 0;
3839 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3840 /* process the new consecutive TSN first */
3841 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3842 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3843 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3844 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3846 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3848 * If it is less than ACKED, it is
3849 * now no-longer in flight. Higher
3850 * values may occur during marking
3852 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3853 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3854 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3855 tp1->whoTo->flight_size,
3857 (uint32_t) (uintptr_t) tp1->whoTo,
3858 tp1->rec.data.TSN_seq);
3860 sctp_flight_size_decrease(tp1);
3861 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3862 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3865 /* sa_ignore NO_NULL_CHK */
3866 sctp_total_flight_decrease(stcb, tp1);
3868 tp1->whoTo->net_ack += tp1->send_size;
3869 if (tp1->snd_count < 2) {
3871 * True non-retransmited
3874 tp1->whoTo->net_ack2 +=
3877 /* update RTO too? */
3886 sctp_calculate_rto(stcb,
3888 &tp1->sent_rcv_time,
3889 sctp_align_safe_nocopy,
3890 SCTP_RTT_FROM_DATA);
3893 if (tp1->whoTo->rto_needed == 0) {
3894 tp1->whoTo->rto_needed = 1;
3900 * CMT: CUCv2 algorithm. From the
3901 * cumack'd TSNs, for each TSN being
3902 * acked for the first time, set the
3903 * following variables for the
3904 * corresp destination.
3905 * new_pseudo_cumack will trigger a
3907 * find_(rtx_)pseudo_cumack will
3908 * trigger search for the next
3909 * expected (rtx-)pseudo-cumack.
3911 tp1->whoTo->new_pseudo_cumack = 1;
3912 tp1->whoTo->find_pseudo_cumack = 1;
3913 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3915 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3916 /* sa_ignore NO_NULL_CHK */
3917 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3920 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3921 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3923 if (tp1->rec.data.chunk_was_revoked) {
3924 /* deflate the cwnd */
3925 tp1->whoTo->cwnd -= tp1->book_size;
3926 tp1->rec.data.chunk_was_revoked = 0;
3928 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3929 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3930 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3933 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3937 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3938 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3939 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3940 asoc->trigger_reset = 1;
3942 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3944 /* sa_ignore NO_NULL_CHK */
3945 sctp_free_bufspace(stcb, asoc, tp1, 1);
3946 sctp_m_freem(tp1->data);
3949 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3950 sctp_log_sack(asoc->last_acked_seq,
3952 tp1->rec.data.TSN_seq,
3955 SCTP_LOG_FREE_SENT);
3957 asoc->sent_queue_cnt--;
3958 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3965 /* sa_ignore NO_NULL_CHK */
3966 if (stcb->sctp_socket) {
3967 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3971 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3972 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3973 /* sa_ignore NO_NULL_CHK */
3974 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3976 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3977 so = SCTP_INP_SO(stcb->sctp_ep);
3978 atomic_add_int(&stcb->asoc.refcnt, 1);
3979 SCTP_TCB_UNLOCK(stcb);
3980 SCTP_SOCKET_LOCK(so, 1);
3981 SCTP_TCB_LOCK(stcb);
3982 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3983 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3984 /* assoc was freed while we were unlocked */
3985 SCTP_SOCKET_UNLOCK(so, 1);
3989 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3990 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3991 SCTP_SOCKET_UNLOCK(so, 1);
3994 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3995 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3999 /* JRS - Use the congestion control given in the CC module */
4000 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4001 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4002 if (net->net_ack2 > 0) {
4004 * Karn's rule applies to clearing error
4005 * count, this is optional.
4007 net->error_count = 0;
4008 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4009 /* addr came good */
4010 net->dest_state |= SCTP_ADDR_REACHABLE;
4011 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4012 0, (void *)net, SCTP_SO_NOT_LOCKED);
4014 if (net == stcb->asoc.primary_destination) {
4015 if (stcb->asoc.alternate) {
4017 * release the alternate,
4020 sctp_free_remote_addr(stcb->asoc.alternate);
4021 stcb->asoc.alternate = NULL;
4024 if (net->dest_state & SCTP_ADDR_PF) {
4025 net->dest_state &= ~SCTP_ADDR_PF;
4026 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4027 stcb->sctp_ep, stcb, net,
4028 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4029 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4030 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4031 /* Done with this net */
4034 /* restore any doubled timers */
4035 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4036 if (net->RTO < stcb->asoc.minrto) {
4037 net->RTO = stcb->asoc.minrto;
4039 if (net->RTO > stcb->asoc.maxrto) {
4040 net->RTO = stcb->asoc.maxrto;
4044 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4046 asoc->last_acked_seq = cumack;
4048 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4049 /* nothing left in-flight */
4050 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4051 net->flight_size = 0;
4052 net->partial_bytes_acked = 0;
4054 asoc->total_flight = 0;
4055 asoc->total_flight_count = 0;
4058 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4059 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4060 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4061 /* SWS sender side engages */
4062 asoc->peers_rwnd = 0;
4064 if (asoc->peers_rwnd > old_rwnd) {
4065 win_probe_recovery = 1;
4067 /* Now assure a timer where data is queued at */
4070 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4073 if (win_probe_recovery && (net->window_probe)) {
4074 win_probe_recovered = 1;
4076 * Find first chunk that was used with window probe
4077 * and clear the sent
4079 /* sa_ignore FREED_MEMORY */
4080 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4081 if (tp1->window_probe) {
4082 /* move back to data send queue */
4083 sctp_window_probe_recovery(stcb, asoc, tp1);
4088 if (net->RTO == 0) {
4089 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4091 to_ticks = MSEC_TO_TICKS(net->RTO);
4093 if (net->flight_size) {
4095 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4096 sctp_timeout_handler, &net->rxt_timer);
4097 if (net->window_probe) {
4098 net->window_probe = 0;
4101 if (net->window_probe) {
4103 * In window probes we must assure a timer
4104 * is still running there
4106 net->window_probe = 0;
4107 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4108 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4109 sctp_timeout_handler, &net->rxt_timer);
4111 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4112 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4114 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4119 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4120 (asoc->sent_queue_retran_cnt == 0) &&
4121 (win_probe_recovered == 0) &&
4124 * huh, this should not happen unless all packets are
4125 * PR-SCTP and marked to skip of course.
4127 if (sctp_fs_audit(asoc)) {
4128 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4129 net->flight_size = 0;
4131 asoc->total_flight = 0;
4132 asoc->total_flight_count = 0;
4133 asoc->sent_queue_retran_cnt = 0;
4134 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4135 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4136 sctp_flight_size_increase(tp1);
4137 sctp_total_flight_increase(stcb, tp1);
4138 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4139 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4146 /**********************************/
4147 /* Now what about shutdown issues */
4148 /**********************************/
4149 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4150 /* nothing left on sendqueue.. consider done */
4152 if ((asoc->stream_queue_cnt == 1) &&
4153 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4154 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4155 (asoc->locked_on_sending)
4157 struct sctp_stream_queue_pending *sp;
4160 * I may be in a state where we got all across.. but
4161 * cannot write more due to a shutdown... we abort
4162 * since the user did not indicate EOR in this case.
4163 * The sp will be cleaned during free of the asoc.
4165 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4167 if ((sp) && (sp->length == 0)) {
4168 /* Let cleanup code purge it */
4169 if (sp->msg_is_complete) {
4170 asoc->stream_queue_cnt--;
4172 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4173 asoc->locked_on_sending = NULL;
4174 asoc->stream_queue_cnt--;
4178 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4179 (asoc->stream_queue_cnt == 0)) {
4180 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4181 /* Need to abort here */
4182 struct mbuf *op_err;
4187 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4188 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4189 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4192 struct sctp_nets *netp;
4194 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4195 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4196 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4198 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4199 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4200 sctp_stop_timers_for_shutdown(stcb);
4201 if (asoc->alternate) {
4202 netp = asoc->alternate;
4204 netp = asoc->primary_destination;
4206 sctp_send_shutdown(stcb, netp);
4207 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4208 stcb->sctp_ep, stcb, netp);
4209 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4210 stcb->sctp_ep, stcb, netp);
4212 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4213 (asoc->stream_queue_cnt == 0)) {
4214 struct sctp_nets *netp;
4216 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4219 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4220 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4221 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4222 sctp_stop_timers_for_shutdown(stcb);
4223 if (asoc->alternate) {
4224 netp = asoc->alternate;
4226 netp = asoc->primary_destination;
4228 sctp_send_shutdown_ack(stcb, netp);
4229 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4230 stcb->sctp_ep, stcb, netp);
4233 /*********************************************/
4234 /* Here we perform PR-SCTP procedures */
4236 /*********************************************/
4237 /* C1. update advancedPeerAckPoint */
4238 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4239 asoc->advanced_peer_ack_point = cumack;
4241 /* PR-Sctp issues need to be addressed too */
4242 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4243 struct sctp_tmit_chunk *lchk;
4244 uint32_t old_adv_peer_ack_point;
4246 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4247 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4248 /* C3. See if we need to send a Fwd-TSN */
4249 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4251 * ISSUE with ECN, see FWD-TSN processing.
4253 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4254 send_forward_tsn(stcb, asoc);
4256 /* try to FR fwd-tsn's that get lost too */
4257 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4258 send_forward_tsn(stcb, asoc);
4263 /* Assure a timer is up */
4264 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4265 stcb->sctp_ep, stcb, lchk->whoTo);
4268 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4269 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4271 stcb->asoc.peers_rwnd,
4272 stcb->asoc.total_flight,
4273 stcb->asoc.total_output_queue_size);
4278 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4279 struct sctp_tcb *stcb,
4280 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4281 int *abort_now, uint8_t flags,
4282 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4284 struct sctp_association *asoc;
4285 struct sctp_tmit_chunk *tp1, *tp2;
4286 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4287 uint16_t wake_him = 0;
4288 uint32_t send_s = 0;
4290 int accum_moved = 0;
4291 int will_exit_fast_recovery = 0;
4292 uint32_t a_rwnd, old_rwnd;
4293 int win_probe_recovery = 0;
4294 int win_probe_recovered = 0;
4295 struct sctp_nets *net = NULL;
4298 uint8_t reneged_all = 0;
4299 uint8_t cmt_dac_flag;
4302 * we take any chance we can to service our queues since we cannot
4303 * get awoken when the socket is read from :<
4306 * Now perform the actual SACK handling: 1) Verify that it is not an
4307 * old sack, if so discard. 2) If there is nothing left in the send
4308 * queue (cum-ack is equal to last acked) then you have a duplicate
4309 * too, update any rwnd change and verify no timers are running.
4310 * then return. 3) Process any new consequtive data i.e. cum-ack
4311 * moved process these first and note that it moved. 4) Process any
4312 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4313 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4314 * sync up flightsizes and things, stop all timers and also check
4315 * for shutdown_pending state. If so then go ahead and send off the
4316 * shutdown. If in shutdown recv, send off the shutdown-ack and
4317 * start that timer, Ret. 9) Strike any non-acked things and do FR
4318 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4319 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4320 * if in shutdown_recv state.
4322 SCTP_TCB_LOCK_ASSERT(stcb);
4324 this_sack_lowest_newack = 0;
4325 SCTP_STAT_INCR(sctps_slowpath_sack);
4327 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4328 #ifdef SCTP_ASOCLOG_OF_TSNS
4329 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4330 stcb->asoc.cumack_log_at++;
4331 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4332 stcb->asoc.cumack_log_at = 0;
4337 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4338 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4339 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4341 old_rwnd = stcb->asoc.peers_rwnd;
4342 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4343 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4344 stcb->asoc.overall_error_count,
4346 SCTP_FROM_SCTP_INDATA,
4349 stcb->asoc.overall_error_count = 0;
4351 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4352 sctp_log_sack(asoc->last_acked_seq,
4359 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4361 uint32_t *dupdata, dblock;
4363 for (i = 0; i < num_dup; i++) {
4364 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4365 sizeof(uint32_t), (uint8_t *) & dblock);
4366 if (dupdata == NULL) {
4369 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4373 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4374 tp1 = TAILQ_LAST(&asoc->sent_queue,
4375 sctpchunk_listhead);
4376 send_s = tp1->rec.data.TSN_seq + 1;
4379 send_s = asoc->sending_seq;
4381 if (SCTP_TSN_GE(cum_ack, send_s)) {
4382 struct mbuf *op_err;
4383 char msg[SCTP_DIAG_INFO_LEN];
4386 * no way, we have not even sent this TSN out yet. Peer is
4387 * hopelessly messed up with us.
4389 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4392 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4393 tp1->rec.data.TSN_seq, (void *)tp1);
4398 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4400 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4401 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4402 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4405 /**********************/
4406 /* 1) check the range */
4407 /**********************/
4408 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4409 /* acking something behind */
4412 /* update the Rwnd of the peer */
4413 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4414 TAILQ_EMPTY(&asoc->send_queue) &&
4415 (asoc->stream_queue_cnt == 0)) {
4416 /* nothing left on send/sent and strmq */
4417 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4418 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4419 asoc->peers_rwnd, 0, 0, a_rwnd);
4421 asoc->peers_rwnd = a_rwnd;
4422 if (asoc->sent_queue_retran_cnt) {
4423 asoc->sent_queue_retran_cnt = 0;
4425 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4426 /* SWS sender side engages */
4427 asoc->peers_rwnd = 0;
4429 /* stop any timers */
4430 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4431 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4432 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4433 net->partial_bytes_acked = 0;
4434 net->flight_size = 0;
4436 asoc->total_flight = 0;
4437 asoc->total_flight_count = 0;
4441 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4442 * things. The total byte count acked is tracked in netAckSz AND
4443 * netAck2 is used to track the total bytes acked that are un-
4444 * amibguious and were never retransmitted. We track these on a per
4445 * destination address basis.
4447 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4448 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4449 /* Drag along the window_tsn for cwr's */
4450 net->cwr_window_tsn = cum_ack;
4452 net->prev_cwnd = net->cwnd;
4457 * CMT: Reset CUC and Fast recovery algo variables before
4460 net->new_pseudo_cumack = 0;
4461 net->will_exit_fast_recovery = 0;
4462 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4463 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4466 /* process the new consecutive TSN first */
4467 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4468 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4469 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4471 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4473 * If it is less than ACKED, it is
4474 * now no-longer in flight. Higher
4475 * values may occur during marking
4477 if ((tp1->whoTo->dest_state &
4478 SCTP_ADDR_UNCONFIRMED) &&
4479 (tp1->snd_count < 2)) {
4481 * If there was no retran
4482 * and the address is
4483 * un-confirmed and we sent
4485 * sacked.. its confirmed,
4488 tp1->whoTo->dest_state &=
4489 ~SCTP_ADDR_UNCONFIRMED;
4491 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4492 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4493 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4494 tp1->whoTo->flight_size,
4496 (uint32_t) (uintptr_t) tp1->whoTo,
4497 tp1->rec.data.TSN_seq);
4499 sctp_flight_size_decrease(tp1);
4500 sctp_total_flight_decrease(stcb, tp1);
4501 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4502 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4506 tp1->whoTo->net_ack += tp1->send_size;
4508 /* CMT SFR and DAC algos */
4509 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4510 tp1->whoTo->saw_newack = 1;
4512 if (tp1->snd_count < 2) {
4514 * True non-retransmited
4517 tp1->whoTo->net_ack2 +=
4520 /* update RTO too? */
4524 sctp_calculate_rto(stcb,
4526 &tp1->sent_rcv_time,
4527 sctp_align_safe_nocopy,
4528 SCTP_RTT_FROM_DATA);
4531 if (tp1->whoTo->rto_needed == 0) {
4532 tp1->whoTo->rto_needed = 1;
4538 * CMT: CUCv2 algorithm. From the
4539 * cumack'd TSNs, for each TSN being
4540 * acked for the first time, set the
4541 * following variables for the
4542 * corresp destination.
4543 * new_pseudo_cumack will trigger a
4545 * find_(rtx_)pseudo_cumack will
4546 * trigger search for the next
4547 * expected (rtx-)pseudo-cumack.
4549 tp1->whoTo->new_pseudo_cumack = 1;
4550 tp1->whoTo->find_pseudo_cumack = 1;
4551 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4554 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4555 sctp_log_sack(asoc->last_acked_seq,
4557 tp1->rec.data.TSN_seq,
4560 SCTP_LOG_TSN_ACKED);
4562 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4563 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4566 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4567 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4568 #ifdef SCTP_AUDITING_ENABLED
4569 sctp_audit_log(0xB3,
4570 (asoc->sent_queue_retran_cnt & 0x000000ff));
4573 if (tp1->rec.data.chunk_was_revoked) {
4574 /* deflate the cwnd */
4575 tp1->whoTo->cwnd -= tp1->book_size;
4576 tp1->rec.data.chunk_was_revoked = 0;
4578 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4579 tp1->sent = SCTP_DATAGRAM_ACKED;
4586 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4587 /* always set this up to cum-ack */
4588 asoc->this_sack_highest_gap = last_tsn;
4590 if ((num_seg > 0) || (num_nr_seg > 0)) {
4593 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4594 * to be greater than the cumack. Also reset saw_newack to 0
4597 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4598 net->saw_newack = 0;
4599 net->this_sack_highest_newack = last_tsn;
4603 * thisSackHighestGap will increase while handling NEW
4604 * segments this_sack_highest_newack will increase while
4605 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4606 * used for CMT DAC algo. saw_newack will also change.
4608 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4609 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4610 num_seg, num_nr_seg, &rto_ok)) {
4614 * validate the biggest_tsn_acked in the gap acks if strict
4615 * adherence is wanted.
4617 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4619 * peer is either confused or we are under attack.
4622 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4623 biggest_tsn_acked, send_s);
4627 /*******************************************/
4628 /* cancel ALL T3-send timer if accum moved */
4629 /*******************************************/
4630 if (asoc->sctp_cmt_on_off > 0) {
4631 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4632 if (net->new_pseudo_cumack)
4633 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4635 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4640 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4641 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4642 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4646 /********************************************/
4647 /* drop the acked chunks from the sentqueue */
4648 /********************************************/
4649 asoc->last_acked_seq = cum_ack;
4651 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4652 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4655 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4656 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4657 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4660 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4664 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4665 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4666 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4667 asoc->trigger_reset = 1;
4669 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4670 if (PR_SCTP_ENABLED(tp1->flags)) {
4671 if (asoc->pr_sctp_cnt != 0)
4672 asoc->pr_sctp_cnt--;
4674 asoc->sent_queue_cnt--;
4676 /* sa_ignore NO_NULL_CHK */
4677 sctp_free_bufspace(stcb, asoc, tp1, 1);
4678 sctp_m_freem(tp1->data);
4680 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4681 asoc->sent_queue_cnt_removeable--;
4684 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4685 sctp_log_sack(asoc->last_acked_seq,
4687 tp1->rec.data.TSN_seq,
4690 SCTP_LOG_FREE_SENT);
4692 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4695 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4697 panic("Warning flight size is positive and should be 0");
4699 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4700 asoc->total_flight);
4702 asoc->total_flight = 0;
4704 /* sa_ignore NO_NULL_CHK */
4705 if ((wake_him) && (stcb->sctp_socket)) {
4706 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4710 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4711 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4712 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4714 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4715 so = SCTP_INP_SO(stcb->sctp_ep);
4716 atomic_add_int(&stcb->asoc.refcnt, 1);
4717 SCTP_TCB_UNLOCK(stcb);
4718 SCTP_SOCKET_LOCK(so, 1);
4719 SCTP_TCB_LOCK(stcb);
4720 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4721 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4722 /* assoc was freed while we were unlocked */
4723 SCTP_SOCKET_UNLOCK(so, 1);
4727 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4728 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4729 SCTP_SOCKET_UNLOCK(so, 1);
4732 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4733 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4737 if (asoc->fast_retran_loss_recovery && accum_moved) {
4738 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4739 /* Setup so we will exit RFC2582 fast recovery */
4740 will_exit_fast_recovery = 1;
4744 * Check for revoked fragments:
4746 * if Previous sack - Had no frags then we can't have any revoked if
4747 * Previous sack - Had frag's then - If we now have frags aka
4748 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4749 * some of them. else - The peer revoked all ACKED fragments, since
4750 * we had some before and now we have NONE.
4754 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4755 asoc->saw_sack_with_frags = 1;
4756 } else if (asoc->saw_sack_with_frags) {
4757 int cnt_revoked = 0;
4759 /* Peer revoked all dg's marked or acked */
4760 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4761 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4762 tp1->sent = SCTP_DATAGRAM_SENT;
4763 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4764 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4765 tp1->whoTo->flight_size,
4767 (uint32_t) (uintptr_t) tp1->whoTo,
4768 tp1->rec.data.TSN_seq);
4770 sctp_flight_size_increase(tp1);
4771 sctp_total_flight_increase(stcb, tp1);
4772 tp1->rec.data.chunk_was_revoked = 1;
4774 * To ensure that this increase in
4775 * flightsize, which is artificial, does not
4776 * throttle the sender, we also increase the
4777 * cwnd artificially.
4779 tp1->whoTo->cwnd += tp1->book_size;
4786 asoc->saw_sack_with_frags = 0;
4789 asoc->saw_sack_with_nr_frags = 1;
4791 asoc->saw_sack_with_nr_frags = 0;
4793 /* JRS - Use the congestion control given in the CC module */
4794 if (ecne_seen == 0) {
4795 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4796 if (net->net_ack2 > 0) {
4798 * Karn's rule applies to clearing error
4799 * count, this is optional.
4801 net->error_count = 0;
4802 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4803 /* addr came good */
4804 net->dest_state |= SCTP_ADDR_REACHABLE;
4805 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4806 0, (void *)net, SCTP_SO_NOT_LOCKED);
4808 if (net == stcb->asoc.primary_destination) {
4809 if (stcb->asoc.alternate) {
4811 * release the alternate,
4814 sctp_free_remote_addr(stcb->asoc.alternate);
4815 stcb->asoc.alternate = NULL;
4818 if (net->dest_state & SCTP_ADDR_PF) {
4819 net->dest_state &= ~SCTP_ADDR_PF;
4820 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4821 stcb->sctp_ep, stcb, net,
4822 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4823 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4824 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4825 /* Done with this net */
4828 /* restore any doubled timers */
4829 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4830 if (net->RTO < stcb->asoc.minrto) {
4831 net->RTO = stcb->asoc.minrto;
4833 if (net->RTO > stcb->asoc.maxrto) {
4834 net->RTO = stcb->asoc.maxrto;
4838 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4840 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4841 /* nothing left in-flight */
4842 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4843 /* stop all timers */
4844 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4846 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4847 net->flight_size = 0;
4848 net->partial_bytes_acked = 0;
4850 asoc->total_flight = 0;
4851 asoc->total_flight_count = 0;
4853 /**********************************/
4854 /* Now what about shutdown issues */
4855 /**********************************/
4856 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4857 /* nothing left on sendqueue.. consider done */
4858 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4859 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4860 asoc->peers_rwnd, 0, 0, a_rwnd);
4862 asoc->peers_rwnd = a_rwnd;
4863 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4864 /* SWS sender side engages */
4865 asoc->peers_rwnd = 0;
4868 if ((asoc->stream_queue_cnt == 1) &&
4869 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4870 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4871 (asoc->locked_on_sending)
4873 struct sctp_stream_queue_pending *sp;
4876 * I may be in a state where we got all across.. but
4877 * cannot write more due to a shutdown... we abort
4878 * since the user did not indicate EOR in this case.
4880 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4882 if ((sp) && (sp->length == 0)) {
4883 asoc->locked_on_sending = NULL;
4884 if (sp->msg_is_complete) {
4885 asoc->stream_queue_cnt--;
4887 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4888 asoc->stream_queue_cnt--;
4892 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4893 (asoc->stream_queue_cnt == 0)) {
4894 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4895 /* Need to abort here */
4896 struct mbuf *op_err;
4901 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4902 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4903 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4906 struct sctp_nets *netp;
4908 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4909 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4910 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4912 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4913 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4914 sctp_stop_timers_for_shutdown(stcb);
4915 if (asoc->alternate) {
4916 netp = asoc->alternate;
4918 netp = asoc->primary_destination;
4920 sctp_send_shutdown(stcb, netp);
4921 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4922 stcb->sctp_ep, stcb, netp);
4923 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4924 stcb->sctp_ep, stcb, netp);
4927 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4928 (asoc->stream_queue_cnt == 0)) {
4929 struct sctp_nets *netp;
4931 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4934 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4935 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4936 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4937 sctp_stop_timers_for_shutdown(stcb);
4938 if (asoc->alternate) {
4939 netp = asoc->alternate;
4941 netp = asoc->primary_destination;
4943 sctp_send_shutdown_ack(stcb, netp);
4944 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4945 stcb->sctp_ep, stcb, netp);
4950 * Now here we are going to recycle net_ack for a different use...
4953 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4958 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4959 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4960 * automatically ensure that.
4962 if ((asoc->sctp_cmt_on_off > 0) &&
4963 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4964 (cmt_dac_flag == 0)) {
4965 this_sack_lowest_newack = cum_ack;
4967 if ((num_seg > 0) || (num_nr_seg > 0)) {
4968 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4969 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4971 /* JRS - Use the congestion control given in the CC module */
4972 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4974 /* Now are we exiting loss recovery ? */
4975 if (will_exit_fast_recovery) {
4976 /* Ok, we must exit fast recovery */
4977 asoc->fast_retran_loss_recovery = 0;
4979 if ((asoc->sat_t3_loss_recovery) &&
4980 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4981 /* end satellite t3 loss recovery */
4982 asoc->sat_t3_loss_recovery = 0;
4987 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4988 if (net->will_exit_fast_recovery) {
4989 /* Ok, we must exit fast recovery */
4990 net->fast_retran_loss_recovery = 0;
4994 /* Adjust and set the new rwnd value */
4995 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4996 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4997 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4999 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5000 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5001 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5002 /* SWS sender side engages */
5003 asoc->peers_rwnd = 0;
5005 if (asoc->peers_rwnd > old_rwnd) {
5006 win_probe_recovery = 1;
5009 * Now we must setup so we have a timer up for anyone with
5015 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5016 if (win_probe_recovery && (net->window_probe)) {
5017 win_probe_recovered = 1;
5019 * Find first chunk that was used with
5020 * window probe and clear the event. Put
5021 * it back into the send queue as if has
5024 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5025 if (tp1->window_probe) {
5026 sctp_window_probe_recovery(stcb, asoc, tp1);
5031 if (net->flight_size) {
5033 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5034 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5035 stcb->sctp_ep, stcb, net);
5037 if (net->window_probe) {
5038 net->window_probe = 0;
5041 if (net->window_probe) {
5043 * In window probes we must assure a timer
5044 * is still running there
5046 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5047 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5048 stcb->sctp_ep, stcb, net);
5051 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5052 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5054 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5059 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5060 (asoc->sent_queue_retran_cnt == 0) &&
5061 (win_probe_recovered == 0) &&
5064 * huh, this should not happen unless all packets are
5065 * PR-SCTP and marked to skip of course.
5067 if (sctp_fs_audit(asoc)) {
5068 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5069 net->flight_size = 0;
5071 asoc->total_flight = 0;
5072 asoc->total_flight_count = 0;
5073 asoc->sent_queue_retran_cnt = 0;
5074 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5075 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5076 sctp_flight_size_increase(tp1);
5077 sctp_total_flight_increase(stcb, tp1);
5078 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5079 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5086 /*********************************************/
5087 /* Here we perform PR-SCTP procedures */
5089 /*********************************************/
5090 /* C1. update advancedPeerAckPoint */
5091 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5092 asoc->advanced_peer_ack_point = cum_ack;
5094 /* C2. try to further move advancedPeerAckPoint ahead */
5095 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5096 struct sctp_tmit_chunk *lchk;
5097 uint32_t old_adv_peer_ack_point;
5099 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5100 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5101 /* C3. See if we need to send a Fwd-TSN */
5102 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5104 * ISSUE with ECN, see FWD-TSN processing.
5106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5107 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5108 0xee, cum_ack, asoc->advanced_peer_ack_point,
5109 old_adv_peer_ack_point);
5111 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5112 send_forward_tsn(stcb, asoc);
5114 /* try to FR fwd-tsn's that get lost too */
5115 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5116 send_forward_tsn(stcb, asoc);
5121 /* Assure a timer is up */
5122 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5123 stcb->sctp_ep, stcb, lchk->whoTo);
5126 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5127 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5129 stcb->asoc.peers_rwnd,
5130 stcb->asoc.total_flight,
5131 stcb->asoc.total_output_queue_size);
5136 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5139 uint32_t cum_ack, a_rwnd;
5141 cum_ack = ntohl(cp->cumulative_tsn_ack);
5142 /* Arrange so a_rwnd does NOT change */
5143 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5145 /* Now call the express sack handling */
5146 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5150 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5151 struct sctp_stream_in *strmin)
5153 struct sctp_queued_to_read *ctl, *nctl;
5154 struct sctp_association *asoc;
5156 int need_reasm_check = 0, old;
5159 tt = strmin->last_sequence_delivered;
5160 if (asoc->idata_supported) {
5166 * First deliver anything prior to and including the stream no that
5169 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5170 if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
5171 /* this is deliverable now */
5172 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5173 if (ctl->on_strm_q) {
5174 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5175 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5176 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5177 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5180 panic("strmin: %p ctl: %p unknown %d",
5181 strmin, ctl, ctl->on_strm_q);
5186 /* subtract pending on streams */
5187 asoc->size_on_all_streams -= ctl->length;
5188 sctp_ucount_decr(asoc->cnt_on_all_streams);
5189 /* deliver it to at least the delivery-q */
5190 if (stcb->sctp_socket) {
5191 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5192 sctp_add_to_readq(stcb->sctp_ep, stcb,
5194 &stcb->sctp_socket->so_rcv,
5195 1, SCTP_READ_LOCK_HELD,
5196 SCTP_SO_NOT_LOCKED);
5199 /* Its a fragmented message */
5200 if (ctl->first_frag_seen) {
5202 * Make it so this is next to
5203 * deliver, we restore later
5205 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5206 need_reasm_check = 1;
5211 /* no more delivery now. */
5215 if (need_reasm_check) {
5218 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5219 if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
5220 /* Restore the next to deliver unless we are ahead */
5221 strmin->last_sequence_delivered = tt;
5224 /* Left the front Partial one on */
5227 need_reasm_check = 0;
5230 * now we must deliver things in queue the normal way if any are
5233 tt = strmin->last_sequence_delivered + 1;
5234 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5235 if (tt == ctl->sinfo_ssn) {
5236 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5237 /* this is deliverable now */
5238 if (ctl->on_strm_q) {
5239 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5240 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5241 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5242 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5245 panic("strmin: %p ctl: %p unknown %d",
5246 strmin, ctl, ctl->on_strm_q);
5251 /* subtract pending on streams */
5252 asoc->size_on_all_streams -= ctl->length;
5253 sctp_ucount_decr(asoc->cnt_on_all_streams);
5254 /* deliver it to at least the delivery-q */
5255 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5256 if (stcb->sctp_socket) {
5257 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5258 sctp_add_to_readq(stcb->sctp_ep, stcb,
5260 &stcb->sctp_socket->so_rcv, 1,
5261 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5264 tt = strmin->last_sequence_delivered + 1;
5266 /* Its a fragmented message */
5267 if (ctl->first_frag_seen) {
5269 * Make it so this is next to
5272 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5273 need_reasm_check = 1;
5281 if (need_reasm_check) {
5282 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5287 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5288 struct sctp_association *asoc,
5289 uint16_t stream, uint32_t seq)
5291 struct sctp_queued_to_read *control;
5292 struct sctp_stream_in *strm;
5293 struct sctp_tmit_chunk *chk, *nchk;
5296 * For now large messages held on the stream reasm that are complete
5297 * will be tossed too. We could in theory do more work to spin
5298 * through and stop after dumping one msg aka seeing the start of a
5299 * new msg at the head, and call the delivery function... to see if
5300 * it can be delivered... But for now we just dump everything on the
5303 strm = &asoc->strmin[stream];
5304 control = find_reasm_entry(strm, (uint32_t) seq, 0, 0);
5305 if (control == NULL) {
5309 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5310 /* Purge hanging chunks */
5311 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5312 asoc->size_on_reasm_queue -= chk->send_size;
5313 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5315 sctp_m_freem(chk->data);
5318 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5320 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5321 if (control->on_read_q == 0) {
5322 sctp_free_remote_addr(control->whoFrom);
5323 if (control->data) {
5324 sctp_m_freem(control->data);
5325 control->data = NULL;
5327 sctp_free_a_readq(stcb, control);
5333 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5334 struct sctp_forward_tsn_chunk *fwd,
5335 int *abort_flag, struct mbuf *m, int offset)
5337 /* The pr-sctp fwd tsn */
5339 * here we will perform all the data receiver side steps for
5340 * processing FwdTSN, as required in by pr-sctp draft:
5342 * Assume we get FwdTSN(x):
5344 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5345 * others we have 3) examine and update re-ordering queue on
5346 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5347 * report where we are.
5349 struct sctp_association *asoc;
5350 uint32_t new_cum_tsn, gap;
5351 unsigned int i, fwd_sz, m_size;
5353 struct sctp_stream_in *strm;
5354 struct sctp_queued_to_read *ctl, *sv;
5357 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5358 SCTPDBG(SCTP_DEBUG_INDATA1,
5359 "Bad size too small/big fwd-tsn\n");
5362 m_size = (stcb->asoc.mapping_array_size << 3);
5363 /*************************************************************/
5364 /* 1. Here we update local cumTSN and shift the bitmap array */
5365 /*************************************************************/
5366 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5368 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5369 /* Already got there ... */
5373 * now we know the new TSN is more advanced, let's find the actual
5376 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5377 asoc->cumulative_tsn = new_cum_tsn;
5378 if (gap >= m_size) {
5379 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5380 struct mbuf *op_err;
5381 char msg[SCTP_DIAG_INFO_LEN];
5384 * out of range (of single byte chunks in the rwnd I
5385 * give out). This must be an attacker.
5388 snprintf(msg, sizeof(msg),
5389 "New cum ack %8.8x too high, highest TSN %8.8x",
5390 new_cum_tsn, asoc->highest_tsn_inside_map);
5391 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5392 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5393 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5396 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5398 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5399 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5400 asoc->highest_tsn_inside_map = new_cum_tsn;
5402 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5403 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5405 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5406 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5409 SCTP_TCB_LOCK_ASSERT(stcb);
5410 for (i = 0; i <= gap; i++) {
5411 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5412 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5413 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5414 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5415 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5420 /*************************************************************/
5421 /* 2. Clear up re-assembly queue */
5422 /*************************************************************/
5424 /* This is now done as part of clearing up the stream/seq */
5426 /*******************************************************/
5427 /* 3. Update the PR-stream re-ordering queues and fix */
5428 /* delivery issues as needed. */
5429 /*******************************************************/
5430 fwd_sz -= sizeof(*fwd);
5433 unsigned int num_str;
5437 struct sctp_strseq *stseq, strseqbuf;
5438 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5440 offset += sizeof(*fwd);
5442 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5443 if (asoc->idata_supported) {
5444 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5447 num_str = fwd_sz / sizeof(struct sctp_strseq);
5450 for (i = 0; i < num_str; i++) {
5451 if (asoc->idata_supported) {
5452 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5453 sizeof(struct sctp_strseq_mid),
5454 (uint8_t *) & strseqbuf_m);
5455 offset += sizeof(struct sctp_strseq_mid);
5456 if (stseq_m == NULL) {
5459 stream = ntohs(stseq_m->stream);
5460 sequence = ntohl(stseq_m->msg_id);
5462 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5463 sizeof(struct sctp_strseq),
5464 (uint8_t *) & strseqbuf);
5465 offset += sizeof(struct sctp_strseq);
5466 if (stseq == NULL) {
5469 stream = ntohs(stseq->stream);
5470 sequence = (uint32_t) ntohs(stseq->sequence);
5477 * Ok we now look for the stream/seq on the read
5478 * queue where its not all delivered. If we find it
5479 * we transmute the read entry into a PDI_ABORTED.
5481 if (stream >= asoc->streamincnt) {
5482 /* screwed up streams, stop! */
5485 if ((asoc->str_of_pdapi == stream) &&
5486 (asoc->ssn_of_pdapi == sequence)) {
5488 * If this is the one we were partially
5489 * delivering now then we no longer are.
5490 * Note this will change with the reassembly
5493 asoc->fragmented_delivery_inprogress = 0;
5495 strm = &asoc->strmin[stream];
5496 sctp_flush_reassm_for_str_seq(stcb, asoc, stream, sequence);
5497 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5498 if ((ctl->sinfo_stream == stream) &&
5499 (ctl->sinfo_ssn == sequence)) {
5500 str_seq = (stream << 16) | (0x0000ffff & sequence);
5501 ctl->pdapi_aborted = 1;
5502 sv = stcb->asoc.control_pdapi;
5504 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5505 TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5506 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5507 TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5509 } else if (ctl->on_strm_q) {
5510 panic("strm: %p ctl: %p unknown %d",
5511 strm, ctl, ctl->on_strm_q);
5515 stcb->asoc.control_pdapi = ctl;
5516 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5518 SCTP_PARTIAL_DELIVERY_ABORTED,
5520 SCTP_SO_NOT_LOCKED);
5521 stcb->asoc.control_pdapi = sv;
5523 } else if ((ctl->sinfo_stream == stream) &&
5524 SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
5525 /* We are past our victim SSN */
5529 if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
5530 /* Update the sequence number */
5531 strm->last_sequence_delivered = sequence;
5533 /* now kick the stream the new way */
5534 /* sa_ignore NO_NULL_CHK */
5535 sctp_kick_prsctp_reorder_queue(stcb, strm);
5537 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5540 * Now slide thing forward.
5542 sctp_slide_mapping_arrays(stcb);