2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
54 * NOTES: On the outbound side of things I need to check the sack timer to
55 * see if I should generate a sack into the chunk queue (if I have data to
56 * send that is and will be sending it .. for bundling.
58 * The callback in sctp_usrreq.c will get called when the socket is read from.
59 * This will cause sctp_service_queues() to get called on the top entry in
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64 struct sctp_stream_in *strm,
65 struct sctp_tcb *stcb,
66 struct sctp_association *asoc,
67 struct sctp_tmit_chunk *chk);
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 /* Calculate what the rwnd would be */
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
83 * This is really set wrong with respect to a 1-2-m socket. Since
84 * the sb_cc is the count that everyone as put up. When we re-write
85 * sctp_soreceive then we will fix this so that ONLY this
86 * associations data is taken into account.
88 if (stcb->sctp_socket == NULL) {
91 if (stcb->asoc.sb_cc == 0 &&
92 asoc->size_on_reasm_queue == 0 &&
93 asoc->size_on_all_streams == 0) {
94 /* Full rwnd granted */
95 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
98 /* get actual space */
99 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
101 * take out what has NOT been put on socket queue and we yet hold
104 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 asoc->cnt_on_reasm_queue * MSIZE));
106 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 asoc->cnt_on_all_streams * MSIZE));
112 /* what is the overhead of all these rwnd's */
113 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
115 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 * even it is 0. SWS engaged
118 if (calc < stcb->asoc.my_rwnd_control_len) {
127 * Build out our readq entry based on the incoming packet.
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131 struct sctp_nets *net,
132 uint32_t tsn, uint32_t ppid,
133 uint32_t context, uint16_t stream_no,
134 uint32_t stream_seq, uint8_t flags,
137 struct sctp_queued_to_read *read_queue_e = NULL;
139 sctp_alloc_a_readq(stcb, read_queue_e);
140 if (read_queue_e == NULL) {
143 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 read_queue_e->sinfo_stream = stream_no;
145 read_queue_e->sinfo_ssn = stream_seq;
146 read_queue_e->sinfo_flags = (flags << 8);
147 read_queue_e->sinfo_ppid = ppid;
148 read_queue_e->sinfo_context = context;
149 read_queue_e->sinfo_tsn = tsn;
150 read_queue_e->sinfo_cumtsn = tsn;
151 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
152 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 TAILQ_INIT(&read_queue_e->reasm);
154 read_queue_e->whoFrom = net;
155 atomic_add_int(&net->ref_count, 1);
156 read_queue_e->data = dm;
157 read_queue_e->stcb = stcb;
158 read_queue_e->port_from = stcb->rport;
160 return (read_queue_e);
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
166 struct sctp_extrcvinfo *seinfo;
167 struct sctp_sndrcvinfo *outinfo;
168 struct sctp_rcvinfo *rcvinfo;
169 struct sctp_nxtinfo *nxtinfo;
176 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 /* user does not want any ancillary data */
183 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
186 seinfo = (struct sctp_extrcvinfo *)sinfo;
187 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
190 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
197 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
200 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
206 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
211 SCTP_BUF_LEN(ret) = 0;
213 /* We need a CMSG header followed by the struct */
214 cmh = mtod(ret, struct cmsghdr *);
216 * Make sure that there is no un-initialized padding between the
217 * cmsg header and cmsg data and after the cmsg data.
220 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 cmh->cmsg_level = IPPROTO_SCTP;
222 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 cmh->cmsg_type = SCTP_RCVINFO;
224 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 rcvinfo->rcv_context = sinfo->sinfo_context;
232 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
237 cmh->cmsg_level = IPPROTO_SCTP;
238 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 cmh->cmsg_type = SCTP_NXTINFO;
240 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 nxtinfo->nxt_flags = 0;
243 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 nxtinfo->nxt_flags |= SCTP_UNORDERED;
246 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 nxtinfo->nxt_flags |= SCTP_COMPLETE;
252 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 cmh->cmsg_level = IPPROTO_SCTP;
260 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
262 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 cmh->cmsg_type = SCTP_EXTRCV;
264 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 cmh->cmsg_type = SCTP_SNDRCV;
270 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
280 uint32_t gap, i, cumackp1;
282 int in_r = 0, in_nr = 0;
284 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
287 cumackp1 = asoc->cumulative_tsn + 1;
288 if (SCTP_TSN_GT(cumackp1, tsn)) {
290 * this tsn is behind the cum ack and thus we don't need to
291 * worry about it being moved from one to the other.
295 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 if ((in_r == 0) && (in_nr == 0)) {
300 panic("Things are really messed up now");
302 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 sctp_print_mapping_array(asoc);
307 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
309 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 asoc->highest_tsn_inside_nr_map = tsn;
313 if (tsn == asoc->highest_tsn_inside_map) {
314 /* We must back down to see what the new highest is */
315 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 asoc->highest_tsn_inside_map = i;
324 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331 struct sctp_association *asoc,
332 struct sctp_queued_to_read *control)
334 struct sctp_queued_to_read *at;
335 struct sctp_readhead *q;
336 uint8_t bits, unordered;
338 bits = (control->sinfo_flags >> 8);
339 unordered = bits & SCTP_DATA_UNORDERED;
341 q = &strm->uno_inqueue;
342 if (asoc->idata_supported == 0) {
343 if (!TAILQ_EMPTY(q)) {
345 * Only one stream can be here in old style
350 TAILQ_INSERT_TAIL(q, control, next_instrm);
351 control->on_strm_q = SCTP_ON_UNORDERED;
357 if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
360 if (TAILQ_EMPTY(q)) {
362 TAILQ_INSERT_HEAD(q, control, next_instrm);
364 control->on_strm_q = SCTP_ON_UNORDERED;
366 control->on_strm_q = SCTP_ON_ORDERED;
370 TAILQ_FOREACH(at, q, next_instrm) {
371 if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
373 * one in queue is bigger than the new one,
374 * insert before this one
376 TAILQ_INSERT_BEFORE(at, control, next_instrm);
378 control->on_strm_q = SCTP_ON_UNORDERED;
380 control->on_strm_q = SCTP_ON_ORDERED;
383 } else if (at->msg_id == control->msg_id) {
385 * Gak, He sent me a duplicate msg id
386 * number?? return -1 to abort.
390 if (TAILQ_NEXT(at, next_instrm) == NULL) {
392 * We are at the end, insert it
395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 sctp_log_strm_del(control, at,
397 SCTP_STR_LOG_FROM_INSERT_TL);
399 TAILQ_INSERT_AFTER(q,
400 at, control, next_instrm);
402 control->on_strm_q = SCTP_ON_UNORDERED;
404 control->on_strm_q = SCTP_ON_ORDERED;
415 sctp_abort_in_reasm(struct sctp_tcb *stcb,
416 struct sctp_queued_to_read *control,
417 struct sctp_tmit_chunk *chk,
418 int *abort_flag, int opspot)
420 char msg[SCTP_DIAG_INFO_LEN];
423 if (stcb->asoc.idata_supported) {
424 snprintf(msg, sizeof(msg),
425 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
427 control->fsn_included,
428 chk->rec.data.TSN_seq,
429 chk->rec.data.stream_number,
430 chk->rec.data.fsn_num, chk->rec.data.stream_seq);
432 snprintf(msg, sizeof(msg),
433 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
435 control->fsn_included,
436 chk->rec.data.TSN_seq,
437 chk->rec.data.stream_number,
438 chk->rec.data.fsn_num,
439 (uint16_t) chk->rec.data.stream_seq);
441 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
442 sctp_m_freem(chk->data);
444 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
445 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
446 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
451 clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
454 * The control could not be placed and must be cleaned.
456 struct sctp_tmit_chunk *chk, *nchk;
458 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
459 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
461 sctp_m_freem(chk->data);
463 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
465 sctp_free_a_readq(stcb, control);
469 * Queue the chunk either right into the socket buffer if it is the next one
470 * to go OR put it in the correct place in the delivery queue. If we do
471 * append to the so_buf, keep doing so until we are out of order as
472 * long as the control's entered are non-fragmented.
475 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
476 struct sctp_stream_in *strm,
477 struct sctp_association *asoc,
478 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
481 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
482 * all the data in one stream this could happen quite rapidly. One
483 * could use the TSN to keep track of things, but this scheme breaks
484 * down in the other type of stream usage that could occur. Send a
485 * single msg to stream 0, send 4Billion messages to stream 1, now
486 * send a message to stream 0. You have a situation where the TSN
487 * has wrapped but not in the stream. Is this worth worrying about
488 * or should we just change our queue sort at the bottom to be by
491 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
492 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
493 * assignment this could happen... and I don't see how this would be
494 * a violation. So for now I am undecided an will leave the sort by
495 * SSN alone. Maybe a hybred approach is the answer
498 struct sctp_queued_to_read *at;
502 char msg[SCTP_DIAG_INFO_LEN];
504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
505 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
507 if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
508 /* The incoming sseq is behind where we last delivered? */
509 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
510 control->sinfo_ssn, strm->last_sequence_delivered);
513 * throw it in the stream so it gets cleaned up in
514 * association destruction
516 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
517 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
518 strm->last_sequence_delivered, control->sinfo_tsn,
519 control->sinfo_stream, control->sinfo_ssn);
520 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
521 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
522 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
527 if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
531 asoc->size_on_all_streams += control->length;
532 sctp_ucount_incr(asoc->cnt_on_all_streams);
533 nxt_todel = strm->last_sequence_delivered + 1;
534 if (nxt_todel == control->sinfo_ssn) {
535 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
538 so = SCTP_INP_SO(stcb->sctp_ep);
539 atomic_add_int(&stcb->asoc.refcnt, 1);
540 SCTP_TCB_UNLOCK(stcb);
541 SCTP_SOCKET_LOCK(so, 1);
543 atomic_subtract_int(&stcb->asoc.refcnt, 1);
544 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
545 SCTP_SOCKET_UNLOCK(so, 1);
549 /* can be delivered right away? */
550 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
551 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
553 /* EY it wont be queued if it could be delivered directly */
555 asoc->size_on_all_streams -= control->length;
556 sctp_ucount_decr(asoc->cnt_on_all_streams);
557 strm->last_sequence_delivered++;
558 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
559 sctp_add_to_readq(stcb->sctp_ep, stcb,
561 &stcb->sctp_socket->so_rcv, 1,
562 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
563 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
565 nxt_todel = strm->last_sequence_delivered + 1;
566 if ((nxt_todel == control->sinfo_ssn) &&
567 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
568 asoc->size_on_all_streams -= control->length;
569 sctp_ucount_decr(asoc->cnt_on_all_streams);
570 if (control->on_strm_q == SCTP_ON_ORDERED) {
571 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
574 panic("Huh control: %p is on_strm_q: %d",
575 control, control->on_strm_q);
578 control->on_strm_q = 0;
579 strm->last_sequence_delivered++;
581 * We ignore the return of deliver_data here
582 * since we always can hold the chunk on the
583 * d-queue. And we have a finite number that
584 * can be delivered from the strq.
586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
587 sctp_log_strm_del(control, NULL,
588 SCTP_STR_LOG_FROM_IMMED_DEL);
590 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
591 sctp_add_to_readq(stcb->sctp_ep, stcb,
593 &stcb->sctp_socket->so_rcv, 1,
594 SCTP_READ_LOCK_NOT_HELD,
597 } else if (nxt_todel == control->sinfo_ssn) {
602 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
603 SCTP_SOCKET_UNLOCK(so, 1);
608 * Ok, we did not deliver this guy, find the correct place
609 * to put it on the queue.
611 if (sctp_place_control_in_stream(strm, asoc, control)) {
612 snprintf(msg, sizeof(msg),
613 "Queue to str msg_id: %u duplicate",
615 clean_up_control(stcb, control);
616 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
617 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
618 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
626 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
628 struct mbuf *m, *prev = NULL;
629 struct sctp_tcb *stcb;
631 stcb = control->stcb;
632 control->held_length = 0;
636 if (SCTP_BUF_LEN(m) == 0) {
637 /* Skip mbufs with NO length */
640 control->data = sctp_m_free(m);
643 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
644 m = SCTP_BUF_NEXT(prev);
647 control->tail_mbuf = prev;
652 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
653 if (control->on_read_q) {
655 * On read queue so we must increment the SB stuff,
656 * we assume caller has done any locks of SB.
658 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
660 m = SCTP_BUF_NEXT(m);
663 control->tail_mbuf = prev;
668 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
670 struct mbuf *prev = NULL;
671 struct sctp_tcb *stcb;
673 stcb = control->stcb;
676 panic("Control broken");
681 if (control->tail_mbuf == NULL) {
684 sctp_setup_tail_pointer(control);
687 control->tail_mbuf->m_next = m;
689 if (SCTP_BUF_LEN(m) == 0) {
690 /* Skip mbufs with NO length */
693 control->tail_mbuf->m_next = sctp_m_free(m);
694 m = control->tail_mbuf->m_next;
696 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
697 m = SCTP_BUF_NEXT(prev);
700 control->tail_mbuf = prev;
705 if (control->on_read_q) {
707 * On read queue so we must increment the SB stuff,
708 * we assume caller has done any locks of SB.
710 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
712 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
713 m = SCTP_BUF_NEXT(m);
716 control->tail_mbuf = prev;
721 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
723 memset(nc, 0, sizeof(struct sctp_queued_to_read));
724 nc->sinfo_stream = control->sinfo_stream;
725 nc->sinfo_ssn = control->sinfo_ssn;
726 TAILQ_INIT(&nc->reasm);
727 nc->top_fsn = control->top_fsn;
728 nc->msg_id = control->msg_id;
729 nc->sinfo_flags = control->sinfo_flags;
730 nc->sinfo_ppid = control->sinfo_ppid;
731 nc->sinfo_context = control->sinfo_context;
732 nc->fsn_included = 0xffffffff;
733 nc->sinfo_tsn = control->sinfo_tsn;
734 nc->sinfo_cumtsn = control->sinfo_cumtsn;
735 nc->sinfo_assoc_id = control->sinfo_assoc_id;
736 nc->whoFrom = control->whoFrom;
737 atomic_add_int(&nc->whoFrom->ref_count, 1);
738 nc->stcb = control->stcb;
739 nc->port_from = control->port_from;
743 sctp_handle_old_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm,
744 struct sctp_queued_to_read *control, uint32_t pd_point)
747 * Special handling for the old un-ordered data chunk. All the
748 * chunks/TSN's go to msg_id 0. So we have to do the old style
749 * watching to see if we have it all. If you return one, no other
750 * control entries on the un-ordered queue will be looked at. In
751 * theory there should be no others entries in reality, unless the
752 * guy is sending both unordered NDATA and unordered DATA...
754 struct sctp_tmit_chunk *chk, *lchk, *tchk;
756 struct sctp_queued_to_read *nc = NULL;
759 if (control->first_frag_seen == 0) {
760 /* Nothing we can do, we have not seen the first piece yet */
763 /* Collapse any we can */
766 fsn = control->fsn_included + 1;
767 /* Now what can we add? */
768 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
769 if (chk->rec.data.fsn_num == fsn) {
771 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
772 sctp_add_chk_to_control(control, strm, stcb, asoc, chk);
776 if (control->end_added) {
778 if (!TAILQ_EMPTY(&control->reasm)) {
780 * Ok we have to move anything left
781 * on the control queue to a new
784 sctp_alloc_a_readq(stcb, nc);
785 sctp_build_readq_entry_from_ctl(nc, control);
786 tchk = TAILQ_FIRST(&control->reasm);
787 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
788 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
789 nc->first_frag_seen = 1;
790 nc->fsn_included = tchk->rec.data.fsn_num;
791 nc->data = tchk->data;
792 sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
794 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
795 sctp_setup_tail_pointer(nc);
796 tchk = TAILQ_FIRST(&control->reasm);
798 /* Spin the rest onto the queue */
800 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
801 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
802 tchk = TAILQ_FIRST(&control->reasm);
805 * Now lets add it to the queue
806 * after removing control
808 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
809 nc->on_strm_q = SCTP_ON_UNORDERED;
810 if (control->on_strm_q) {
811 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
812 control->on_strm_q = 0;
815 if (control->pdapi_started) {
816 strm->pd_api_started = 0;
817 control->pdapi_started = 0;
819 if (control->on_strm_q) {
820 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
821 control->on_strm_q = 0;
823 if (control->on_read_q == 0) {
824 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
825 &stcb->sctp_socket->so_rcv, control->end_added,
826 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
828 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
829 if ((nc) && (nc->first_frag_seen)) {
831 * Switch to the new guy and
845 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
846 strm->pd_api_started = 1;
847 control->pdapi_started = 1;
848 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
849 &stcb->sctp_socket->so_rcv, control->end_added,
850 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
851 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
859 sctp_inject_old_data_unordered(struct sctp_tcb *stcb, struct sctp_association *asoc,
860 struct sctp_queued_to_read *control,
861 struct sctp_tmit_chunk *chk,
864 struct sctp_tmit_chunk *at;
868 * Here we need to place the chunk into the control structure sorted
869 * in the correct order.
871 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
872 /* Its the very first one. */
873 SCTPDBG(SCTP_DEBUG_XXX,
874 "chunk is a first fsn: %u becomes fsn_included\n",
875 chk->rec.data.fsn_num);
876 if (control->first_frag_seen) {
878 * In old un-ordered we can reassembly on one
879 * control multiple messages. As long as the next
880 * FIRST is greater then the old first (TSN i.e. FSN
886 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
888 * Easy way the start of a new guy beyond
893 if ((chk->rec.data.fsn_num == control->fsn_included) ||
894 (control->pdapi_started)) {
896 * Ok this should not happen, if it does we
897 * started the pd-api on the higher TSN
898 * (since the equals part is a TSN failure
901 * We are completly hosed in that case since I
902 * have no way to recover. This really will
903 * only happen if we can get more TSN's
904 * higher before the pd-api-point.
906 sctp_abort_in_reasm(stcb, control, chk,
908 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
913 * Ok we have two firsts and the one we just got is
914 * smaller than the one we previously placed.. yuck!
915 * We must swap them out.
918 tdata = control->data;
919 control->data = chk->data;
921 /* Swap the lengths */
922 tmp = control->length;
923 control->length = chk->send_size;
924 chk->send_size = tmp;
925 /* Fix the FSN included */
926 tmp = control->fsn_included;
927 control->fsn_included = chk->rec.data.fsn_num;
928 chk->rec.data.fsn_num = tmp;
931 control->first_frag_seen = 1;
932 control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
933 control->data = chk->data;
934 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
936 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
937 sctp_setup_tail_pointer(control);
941 if (TAILQ_EMPTY(&control->reasm)) {
942 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
943 asoc->size_on_reasm_queue += chk->send_size;
944 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
947 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
948 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
950 * This one in queue is bigger than the new one,
951 * insert the new one before at.
953 asoc->size_on_reasm_queue += chk->send_size;
954 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
956 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
958 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
960 * They sent a duplicate fsn number. This really
961 * should not happen since the FSN is a TSN and it
962 * should have been dropped earlier.
965 sctp_m_freem(chk->data);
968 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
969 sctp_abort_in_reasm(stcb, control, chk,
971 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
977 asoc->size_on_reasm_queue += chk->send_size;
978 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
979 control->top_fsn = chk->rec.data.fsn_num;
980 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
985 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm)
988 * Given a stream, strm, see if any of the SSN's on it that are
989 * fragmented are ready to deliver. If so go ahead and place them on
990 * the read queue. In so placing if we have hit the end, then we
991 * need to remove them from the stream's queue.
993 struct sctp_queued_to_read *control, *nctl = NULL;
994 uint32_t next_to_del;
998 if (stcb->sctp_socket) {
999 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1000 stcb->sctp_ep->partial_delivery_point);
1002 pd_point = stcb->sctp_ep->partial_delivery_point;
1004 control = TAILQ_FIRST(&strm->uno_inqueue);
1006 (asoc->idata_supported == 0)) {
1007 /* Special handling needed for "old" data format */
1008 if (sctp_handle_old_data(stcb, asoc, strm, control, pd_point)) {
1012 if (strm->pd_api_started) {
1013 /* Can't add more */
1017 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1018 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
1019 nctl = TAILQ_NEXT(control, next_instrm);
1020 if (control->end_added) {
1021 /* We just put the last bit on */
1022 if (control->on_strm_q) {
1024 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1025 panic("Huh control: %p on_q: %d -- not unordered?",
1026 control, control->on_strm_q);
1029 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1030 control->on_strm_q = 0;
1032 if (control->on_read_q == 0) {
1033 sctp_add_to_readq(stcb->sctp_ep, stcb,
1035 &stcb->sctp_socket->so_rcv, control->end_added,
1036 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1039 /* Can we do a PD-API for this un-ordered guy? */
1040 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1041 strm->pd_api_started = 1;
1042 control->pdapi_started = 1;
1043 sctp_add_to_readq(stcb->sctp_ep, stcb,
1045 &stcb->sctp_socket->so_rcv, control->end_added,
1046 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1054 control = TAILQ_FIRST(&strm->inqueue);
1055 if (strm->pd_api_started) {
1056 /* Can't add more */
1059 if (control == NULL) {
1062 if (strm->last_sequence_delivered == control->sinfo_ssn) {
1064 * Ok the guy at the top was being partially delivered
1065 * completed, so we remove it. Note the pd_api flag was
1066 * taken off when the chunk was merged on in
1067 * sctp_queue_data_for_reasm below.
1069 nctl = TAILQ_NEXT(control, next_instrm);
1070 SCTPDBG(SCTP_DEBUG_XXX,
1071 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1072 control, control->end_added, control->sinfo_ssn,
1073 control->top_fsn, control->fsn_included,
1074 strm->last_sequence_delivered);
1075 if (control->end_added) {
1076 if (control->on_strm_q) {
1078 if (control->on_strm_q != SCTP_ON_ORDERED) {
1079 panic("Huh control: %p on_q: %d -- not ordered?",
1080 control, control->on_strm_q);
1083 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1084 control->on_strm_q = 0;
1086 if (strm->pd_api_started && control->pdapi_started) {
1087 control->pdapi_started = 0;
1088 strm->pd_api_started = 0;
1090 if (control->on_read_q == 0) {
1091 sctp_add_to_readq(stcb->sctp_ep, stcb,
1093 &stcb->sctp_socket->so_rcv, control->end_added,
1094 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1099 if (strm->pd_api_started) {
1101 * Can't add more must have gotten an un-ordered above being
1102 * partially delivered.
1107 next_to_del = strm->last_sequence_delivered + 1;
1109 SCTPDBG(SCTP_DEBUG_XXX,
1110 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1111 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
1113 nctl = TAILQ_NEXT(control, next_instrm);
1114 if ((control->sinfo_ssn == next_to_del) &&
1115 (control->first_frag_seen)) {
1118 /* Ok we can deliver it onto the stream. */
1119 if (control->end_added) {
1120 /* We are done with it afterwards */
1121 if (control->on_strm_q) {
1123 if (control->on_strm_q != SCTP_ON_ORDERED) {
1124 panic("Huh control: %p on_q: %d -- not ordered?",
1125 control, control->on_strm_q);
1128 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1129 control->on_strm_q = 0;
1133 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1135 * A singleton now slipping through - mark
1136 * it non-revokable too
1138 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1139 } else if (control->end_added == 0) {
1141 * Check if we can defer adding until its
1144 if ((control->length < pd_point) || (strm->pd_api_started)) {
1146 * Don't need it or cannot add more
1147 * (one being delivered that way)
1152 done = (control->end_added) && (control->last_frag_seen);
1153 if (control->on_read_q == 0) {
1154 sctp_add_to_readq(stcb->sctp_ep, stcb,
1156 &stcb->sctp_socket->so_rcv, control->end_added,
1157 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1159 strm->last_sequence_delivered = next_to_del;
1164 /* We are now doing PD API */
1165 strm->pd_api_started = 1;
1166 control->pdapi_started = 1;
1175 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1176 struct sctp_stream_in *strm,
1177 struct sctp_tcb *stcb, struct sctp_association *asoc,
1178 struct sctp_tmit_chunk *chk)
1181 * Given a control and a chunk, merge the data from the chk onto the
1182 * control and free up the chunk resources.
1186 if (control->on_read_q) {
1188 * Its being pd-api'd so we must do some locks.
1190 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1193 if (control->data == NULL) {
1194 control->data = chk->data;
1195 sctp_setup_tail_pointer(control);
1197 sctp_add_to_tail_pointer(control, chk->data);
1199 control->fsn_included = chk->rec.data.fsn_num;
1200 asoc->size_on_reasm_queue -= chk->send_size;
1201 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1202 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1204 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1205 control->first_frag_seen = 1;
1207 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1209 if ((control->on_strm_q) && (control->on_read_q)) {
1210 if (control->pdapi_started) {
1211 control->pdapi_started = 0;
1212 strm->pd_api_started = 0;
1214 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1216 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1217 control->on_strm_q = 0;
1218 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1220 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1221 control->on_strm_q = 0;
1223 } else if (control->on_strm_q) {
1224 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1225 control->on_strm_q);
1229 control->end_added = 1;
1230 control->last_frag_seen = 1;
1233 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1235 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1239 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1240 * queue, see if anthing can be delivered. If so pull it off (or as much as
1241 * we can. If we run out of space then we must dump what we can and set the
1242 * appropriate flag to say we queued what we could.
1245 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1246 struct sctp_stream_in *strm,
1247 struct sctp_queued_to_read *control,
1248 struct sctp_tmit_chunk *chk,
1249 int created_control,
1250 int *abort_flag, uint32_t tsn)
1253 struct sctp_tmit_chunk *at, *nat;
1254 int do_wakeup, unordered;
1257 * For old un-ordered data chunks.
1259 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1264 /* Must be added to the stream-in queue */
1265 if (created_control) {
1266 if (sctp_place_control_in_stream(strm, asoc, control)) {
1267 /* Duplicate SSN? */
1268 clean_up_control(stcb, control);
1269 sctp_abort_in_reasm(stcb, control, chk,
1271 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1274 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1276 * Ok we created this control and now lets validate
1277 * that its legal i.e. there is a B bit set, if not
1278 * and we have up to the cum-ack then its invalid.
1280 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1281 sctp_abort_in_reasm(stcb, control, chk,
1283 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1288 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1289 sctp_inject_old_data_unordered(stcb, asoc, control, chk, abort_flag);
1293 * Ok we must queue the chunk into the reasembly portion: o if its
1294 * the first it goes to the control mbuf. o if its not first but the
1295 * next in sequence it goes to the control, and each succeeding one
1296 * in order also goes. o if its not in order we place it on the list
1299 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1300 /* Its the very first one. */
1301 SCTPDBG(SCTP_DEBUG_XXX,
1302 "chunk is a first fsn: %u becomes fsn_included\n",
1303 chk->rec.data.fsn_num);
1304 if (control->first_frag_seen) {
1306 * Error on senders part, they either sent us two
1307 * data chunks with FIRST, or they sent two
1308 * un-ordered chunks that were fragmented at the
1309 * same time in the same stream.
1311 sctp_abort_in_reasm(stcb, control, chk,
1313 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1316 control->first_frag_seen = 1;
1317 control->fsn_included = chk->rec.data.fsn_num;
1318 control->data = chk->data;
1319 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1321 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1322 sctp_setup_tail_pointer(control);
1324 /* Place the chunk in our list */
1327 if (control->last_frag_seen == 0) {
1328 /* Still willing to raise highest FSN seen */
1329 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1330 SCTPDBG(SCTP_DEBUG_XXX,
1331 "We have a new top_fsn: %u\n",
1332 chk->rec.data.fsn_num);
1333 control->top_fsn = chk->rec.data.fsn_num;
1335 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1336 SCTPDBG(SCTP_DEBUG_XXX,
1337 "The last fsn is now in place fsn: %u\n",
1338 chk->rec.data.fsn_num);
1339 control->last_frag_seen = 1;
1341 if (asoc->idata_supported || control->first_frag_seen) {
1343 * For IDATA we always check since we know
1344 * that the first fragment is 0. For old
1345 * DATA we have to receive the first before
1346 * we know the first FSN (which is the TSN).
1348 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1350 * We have already delivered up to
1353 sctp_abort_in_reasm(stcb, control, chk,
1355 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1360 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1361 /* Second last? huh? */
1362 SCTPDBG(SCTP_DEBUG_XXX,
1363 "Duplicate last fsn: %u (top: %u) -- abort\n",
1364 chk->rec.data.fsn_num, control->top_fsn);
1365 sctp_abort_in_reasm(stcb, control,
1367 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1370 if (asoc->idata_supported || control->first_frag_seen) {
1372 * For IDATA we always check since we know
1373 * that the first fragment is 0. For old
1374 * DATA we have to receive the first before
1375 * we know the first FSN (which is the TSN).
1378 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1380 * We have already delivered up to
1383 SCTPDBG(SCTP_DEBUG_XXX,
1384 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1385 chk->rec.data.fsn_num, control->fsn_included);
1386 sctp_abort_in_reasm(stcb, control, chk,
1388 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1393 * validate not beyond top FSN if we have seen last
1396 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1397 SCTPDBG(SCTP_DEBUG_XXX,
1398 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1399 chk->rec.data.fsn_num,
1401 sctp_abort_in_reasm(stcb, control, chk,
1403 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1408 * If we reach here, we need to place the new chunk in the
1409 * reassembly for this control.
1411 SCTPDBG(SCTP_DEBUG_XXX,
1412 "chunk is a not first fsn: %u needs to be inserted\n",
1413 chk->rec.data.fsn_num);
1414 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1415 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
1417 * This one in queue is bigger than the new
1418 * one, insert the new one before at.
1420 SCTPDBG(SCTP_DEBUG_XXX,
1421 "Insert it before fsn: %u\n",
1422 at->rec.data.fsn_num);
1423 asoc->size_on_reasm_queue += chk->send_size;
1424 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1425 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1428 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
1430 * Gak, He sent me a duplicate str seq
1434 * foo bar, I guess I will just free this
1435 * new guy, should we abort too? FIX ME
1436 * MAYBE? Or it COULD be that the SSN's have
1437 * wrapped. Maybe I should compare to TSN
1438 * somehow... sigh for now just blow away
1441 SCTPDBG(SCTP_DEBUG_XXX,
1442 "Duplicate to fsn: %u -- abort\n",
1443 at->rec.data.fsn_num);
1444 sctp_abort_in_reasm(stcb, control,
1446 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1450 if (inserted == 0) {
1451 /* Goes on the end */
1452 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1453 chk->rec.data.fsn_num);
1454 asoc->size_on_reasm_queue += chk->send_size;
1455 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1456 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1460 * Ok lets see if we can suck any up into the control structure that
1461 * are in seq if it makes sense.
1465 * If the first fragment has not been seen there is no sense in
1468 if (control->first_frag_seen) {
1469 next_fsn = control->fsn_included + 1;
1470 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1471 if (at->rec.data.fsn_num == next_fsn) {
1472 /* We can add this one now to the control */
1473 SCTPDBG(SCTP_DEBUG_XXX,
1474 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1476 at->rec.data.fsn_num,
1477 next_fsn, control->fsn_included);
1478 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1479 sctp_add_chk_to_control(control, strm, stcb, asoc, at);
1480 if (control->on_read_q) {
1484 if (control->end_added && control->pdapi_started) {
1485 if (strm->pd_api_started) {
1486 strm->pd_api_started = 0;
1487 control->pdapi_started = 0;
1489 if (control->on_read_q == 0) {
1490 sctp_add_to_readq(stcb->sctp_ep, stcb,
1492 &stcb->sctp_socket->so_rcv, control->end_added,
1493 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1504 /* Need to wakeup the reader */
1505 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1509 static struct sctp_queued_to_read *
1510 find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
1512 struct sctp_queued_to_read *control;
1515 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1516 if (control->msg_id == msg_id) {
1522 control = TAILQ_FIRST(&strm->uno_inqueue);
1525 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1526 if (control->msg_id == msg_id) {
1535 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1536 struct mbuf **m, int offset, int chk_length,
1537 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1538 int *break_flag, int last_chunk, uint8_t chtype)
1540 /* Process a data chunk */
1541 /* struct sctp_tmit_chunk *chk; */
1542 struct sctp_data_chunk *ch;
1543 struct sctp_idata_chunk *nch, chunk_buf;
1544 struct sctp_tmit_chunk *chk;
1545 uint32_t tsn, fsn, gap, msg_id;
1548 int need_reasm_check = 0;
1550 struct mbuf *op_err;
1551 char msg[SCTP_DIAG_INFO_LEN];
1552 struct sctp_queued_to_read *control = NULL;
1553 uint32_t protocol_id;
1554 uint8_t chunk_flags;
1555 struct sctp_stream_reset_list *liste;
1556 struct sctp_stream_in *strm;
1559 int created_control = 0;
1563 if (chtype == SCTP_IDATA) {
1564 nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1565 sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1566 ch = (struct sctp_data_chunk *)nch;
1567 clen = sizeof(struct sctp_idata_chunk);
1568 tsn = ntohl(ch->dp.tsn);
1569 msg_id = ntohl(nch->dp.msg_id);
1570 if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1573 fsn = ntohl(nch->dp.ppid_fsn.fsn);
1576 ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1577 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1578 tsn = ntohl(ch->dp.tsn);
1579 clen = sizeof(struct sctp_data_chunk);
1581 msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
1585 chunk_flags = ch->ch.chunk_flags;
1586 if ((size_t)chk_length == clen) {
1588 * Need to send an abort since we had a empty data chunk.
1590 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1591 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1592 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1596 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1597 asoc->send_sack = 1;
1599 protocol_id = ch->dp.protocol_id;
1600 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1601 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1602 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1607 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1608 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1609 /* It is a duplicate */
1610 SCTP_STAT_INCR(sctps_recvdupdata);
1611 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1612 /* Record a dup for the next outbound sack */
1613 asoc->dup_tsns[asoc->numduptsns] = tsn;
1616 asoc->send_sack = 1;
1619 /* Calculate the number of TSN's between the base and this TSN */
1620 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1621 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1622 /* Can't hold the bit in the mapping at max array, toss it */
1625 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1626 SCTP_TCB_LOCK_ASSERT(stcb);
1627 if (sctp_expand_mapping_array(asoc, gap)) {
1628 /* Can't expand, drop it */
1632 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1635 /* See if we have received this one already */
1636 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1637 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1638 SCTP_STAT_INCR(sctps_recvdupdata);
1639 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1640 /* Record a dup for the next outbound sack */
1641 asoc->dup_tsns[asoc->numduptsns] = tsn;
1644 asoc->send_sack = 1;
1648 * Check to see about the GONE flag, duplicates would cause a sack
1649 * to be sent up above
1651 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1652 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1653 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1655 * wait a minute, this guy is gone, there is no longer a
1656 * receiver. Send peer an ABORT!
1658 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1659 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1664 * Now before going further we see if there is room. If NOT then we
1665 * MAY let one through only IF this TSN is the one we are waiting
1666 * for on a partial delivery API.
1669 /* Is the stream valid? */
1670 strmno = ntohs(ch->dp.stream_id);
1672 if (strmno >= asoc->streamincnt) {
1673 struct sctp_error_invalid_stream *cause;
1675 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1676 0, M_NOWAIT, 1, MT_DATA);
1677 if (op_err != NULL) {
1678 /* add some space up front so prepend will work well */
1679 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1680 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1682 * Error causes are just param's and this one has
1683 * two back to back phdr, one with the error type
1684 * and size, the other with the streamid and a rsvd
1686 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1687 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1688 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1689 cause->stream_id = ch->dp.stream_id;
1690 cause->reserved = htons(0);
1691 sctp_queue_op_err(stcb, op_err);
1693 SCTP_STAT_INCR(sctps_badsid);
1694 SCTP_TCB_LOCK_ASSERT(stcb);
1695 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1696 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1697 asoc->highest_tsn_inside_nr_map = tsn;
1699 if (tsn == (asoc->cumulative_tsn + 1)) {
1700 /* Update cum-ack */
1701 asoc->cumulative_tsn = tsn;
1705 strm = &asoc->strmin[strmno];
1707 * If its a fragmented message, lets see if we can find the control
1708 * on the reassembly queues.
1710 if ((chtype == SCTP_IDATA) && ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) && (fsn == 0)) {
1712 * The first *must* be fsn 0, and other (middle/end) pieces
1713 * can *not* be fsn 0.
1717 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1718 /* See if we can find the re-assembly entity */
1719 control = find_reasm_entry(strm, msg_id, ordered, old_data);
1720 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1721 chunk_flags, control);
1723 /* We found something, does it belong? */
1724 if (ordered && (msg_id != control->sinfo_ssn)) {
1726 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1727 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1728 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1732 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1734 * We can't have a switched order with an
1739 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1741 * We can't have a switched unordered with a
1749 * Its a complete segment. Lets validate we don't have a
1750 * re-assembly going on with the same Stream/Seq (for
1751 * ordered) or in the same Stream for unordered.
1753 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for msg in case we have dup\n",
1755 if (find_reasm_entry(strm, msg_id, ordered, old_data)) {
1756 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n",
1763 /* now do the tests */
1764 if (((asoc->cnt_on_all_streams +
1765 asoc->cnt_on_reasm_queue +
1766 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1767 (((int)asoc->my_rwnd) <= 0)) {
1769 * When we have NO room in the rwnd we check to make sure
1770 * the reader is doing its job...
1772 if (stcb->sctp_socket->so_rcv.sb_cc) {
1773 /* some to read, wake-up */
1774 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1777 so = SCTP_INP_SO(stcb->sctp_ep);
1778 atomic_add_int(&stcb->asoc.refcnt, 1);
1779 SCTP_TCB_UNLOCK(stcb);
1780 SCTP_SOCKET_LOCK(so, 1);
1781 SCTP_TCB_LOCK(stcb);
1782 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1783 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1784 /* assoc was freed while we were unlocked */
1785 SCTP_SOCKET_UNLOCK(so, 1);
1789 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1790 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1791 SCTP_SOCKET_UNLOCK(so, 1);
1794 /* now is it in the mapping array of what we have accepted? */
1796 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1797 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1798 /* Nope not in the valid range dump it */
1800 sctp_set_rwnd(stcb, asoc);
1801 if ((asoc->cnt_on_all_streams +
1802 asoc->cnt_on_reasm_queue +
1803 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1804 SCTP_STAT_INCR(sctps_datadropchklmt);
1806 SCTP_STAT_INCR(sctps_datadroprwnd);
1812 if (control == NULL) {
1815 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1820 #ifdef SCTP_ASOCLOG_OF_TSNS
1821 SCTP_TCB_LOCK_ASSERT(stcb);
1822 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1823 asoc->tsn_in_at = 0;
1824 asoc->tsn_in_wrapped = 1;
1826 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1827 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1828 asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
1829 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1830 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1831 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1832 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1833 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1837 * Before we continue lets validate that we are not being fooled by
1838 * an evil attacker. We can only have Nk chunks based on our TSN
1839 * spread allowed by the mapping array N * 8 bits, so there is no
1840 * way our stream sequence numbers could have wrapped. We of course
1841 * only validate the FIRST fragment so the bit must be set.
1843 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1844 (TAILQ_EMPTY(&asoc->resetHead)) &&
1845 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1846 SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
1847 /* The incoming sseq is behind where we last delivered? */
1848 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1849 msg_id, asoc->strmin[strmno].last_sequence_delivered);
1851 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1852 asoc->strmin[strmno].last_sequence_delivered,
1853 tsn, strmno, msg_id);
1854 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1855 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1856 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1860 /************************************
1861 * From here down we may find ch-> invalid
1862 * so its a good idea NOT to use it.
1863 *************************************/
1865 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1867 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1869 if (last_chunk == 0) {
1871 dmbuf = SCTP_M_COPYM(*m,
1872 (offset + sizeof(struct sctp_idata_chunk)),
1875 dmbuf = SCTP_M_COPYM(*m,
1876 (offset + sizeof(struct sctp_data_chunk)),
1879 #ifdef SCTP_MBUF_LOGGING
1880 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1881 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1885 /* We can steal the last chunk */
1889 /* lop off the top part */
1891 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1893 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1895 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1896 l_len = SCTP_BUF_LEN(dmbuf);
1899 * need to count up the size hopefully does not hit
1905 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1906 l_len += SCTP_BUF_LEN(lat);
1909 if (l_len > the_len) {
1910 /* Trim the end round bytes off too */
1911 m_adj(dmbuf, -(l_len - the_len));
1914 if (dmbuf == NULL) {
1915 SCTP_STAT_INCR(sctps_nomem);
1919 * Now no matter what we need a control, get one if we don't have
1920 * one (we may have gotten it above when we found the message was
1923 if (control == NULL) {
1924 sctp_alloc_a_readq(stcb, control);
1925 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1930 if (control == NULL) {
1931 SCTP_STAT_INCR(sctps_nomem);
1934 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1935 control->data = dmbuf;
1936 control->tail_mbuf = NULL;
1937 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1938 control->top_fsn = control->fsn_included = fsn;
1940 created_control = 1;
1942 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n",
1943 chunk_flags, ordered, msg_id, control);
1944 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1945 TAILQ_EMPTY(&asoc->resetHead) &&
1947 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
1948 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1949 /* Candidate for express delivery */
1951 * Its not fragmented, No PD-API is up, Nothing in the
1952 * delivery queue, Its un-ordered OR ordered and the next to
1953 * deliver AND nothing else is stuck on the stream queue,
1954 * And there is room for it in the socket buffer. Lets just
1955 * stuff it up the buffer....
1957 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1958 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1959 asoc->highest_tsn_inside_nr_map = tsn;
1961 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n",
1964 sctp_add_to_readq(stcb->sctp_ep, stcb,
1965 control, &stcb->sctp_socket->so_rcv,
1966 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1968 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1969 /* for ordered, bump what we delivered */
1970 strm->last_sequence_delivered++;
1972 SCTP_STAT_INCR(sctps_recvexpress);
1973 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1974 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
1975 SCTP_STR_LOG_FROM_EXPRS_DEL);
1978 goto finish_express_del;
1980 /* Now will we need a chunk too? */
1981 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1982 sctp_alloc_a_chunk(stcb, chk);
1984 /* No memory so we drop the chunk */
1985 SCTP_STAT_INCR(sctps_nomem);
1986 if (last_chunk == 0) {
1987 /* we copied it, free the copy */
1988 sctp_m_freem(dmbuf);
1992 chk->rec.data.TSN_seq = tsn;
1993 chk->no_fr_allowed = 0;
1994 chk->rec.data.fsn_num = fsn;
1995 chk->rec.data.stream_seq = msg_id;
1996 chk->rec.data.stream_number = strmno;
1997 chk->rec.data.payloadtype = protocol_id;
1998 chk->rec.data.context = stcb->asoc.context;
1999 chk->rec.data.doing_fast_retransmit = 0;
2000 chk->rec.data.rcv_flags = chunk_flags;
2002 chk->send_size = the_len;
2004 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n",
2007 atomic_add_int(&net->ref_count, 1);
2010 /* Set the appropriate TSN mark */
2011 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2012 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2013 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2014 asoc->highest_tsn_inside_nr_map = tsn;
2017 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2018 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2019 asoc->highest_tsn_inside_map = tsn;
2022 /* Now is it complete (i.e. not fragmented)? */
2023 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2025 * Special check for when streams are resetting. We could be
2026 * more smart about this and check the actual stream to see
2027 * if it is not being reset.. that way we would not create a
2028 * HOLB when amongst streams being reset and those not being
2032 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2033 SCTP_TSN_GT(tsn, liste->tsn)) {
2035 * yep its past where we need to reset... go ahead
2038 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2040 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2042 struct sctp_queued_to_read *ctlOn, *nctlOn;
2043 unsigned char inserted = 0;
2045 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2046 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2051 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2056 if (inserted == 0) {
2058 * must be put at end, use prevP
2059 * (all setup from loop) to setup
2062 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2065 goto finish_express_del;
2067 if (chunk_flags & SCTP_DATA_UNORDERED) {
2068 /* queue directly into socket buffer */
2069 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n",
2071 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2072 sctp_add_to_readq(stcb->sctp_ep, stcb,
2074 &stcb->sctp_socket->so_rcv, 1,
2075 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2078 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control,
2080 sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2088 goto finish_express_del;
2090 /* If we reach here its a reassembly */
2091 need_reasm_check = 1;
2092 SCTPDBG(SCTP_DEBUG_XXX,
2093 "Queue data to stream for reasm control: %p msg_id: %u\n",
2095 sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2098 * the assoc is now gone and chk was put onto the reasm
2099 * queue, which has all been freed.
2107 /* Here we tidy up things */
2108 if (tsn == (asoc->cumulative_tsn + 1)) {
2109 /* Update cum-ack */
2110 asoc->cumulative_tsn = tsn;
2116 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2118 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2120 SCTP_STAT_INCR(sctps_recvdata);
2121 /* Set it present please */
2122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2123 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2125 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2126 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2127 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2129 /* check the special flag for stream resets */
2130 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2131 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2133 * we have finished working through the backlogged TSN's now
2134 * time to reset streams. 1: call reset function. 2: free
2135 * pending_reply space 3: distribute any chunks in
2136 * pending_reply_queue.
2138 struct sctp_queued_to_read *ctl, *nctl;
2140 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2141 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2142 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2143 SCTP_FREE(liste, SCTP_M_STRESET);
2144 /* sa_ignore FREED_MEMORY */
2145 liste = TAILQ_FIRST(&asoc->resetHead);
2146 if (TAILQ_EMPTY(&asoc->resetHead)) {
2147 /* All can be removed */
2148 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2149 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2150 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2156 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2157 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2161 * if ctl->sinfo_tsn is <= liste->tsn we can
2162 * process it which is the NOT of
2163 * ctl->sinfo_tsn > liste->tsn
2165 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2166 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2173 * Now service re-assembly to pick up anything that has been
2174 * held on reassembly queue?
2176 (void)sctp_deliver_reasm_check(stcb, asoc, strm);
2177 need_reasm_check = 0;
2179 if (need_reasm_check) {
2180 /* Another one waits ? */
2181 (void)sctp_deliver_reasm_check(stcb, asoc, strm);
2186 static const int8_t sctp_map_lookup_tab[256] = {
2187 0, 1, 0, 2, 0, 1, 0, 3,
2188 0, 1, 0, 2, 0, 1, 0, 4,
2189 0, 1, 0, 2, 0, 1, 0, 3,
2190 0, 1, 0, 2, 0, 1, 0, 5,
2191 0, 1, 0, 2, 0, 1, 0, 3,
2192 0, 1, 0, 2, 0, 1, 0, 4,
2193 0, 1, 0, 2, 0, 1, 0, 3,
2194 0, 1, 0, 2, 0, 1, 0, 6,
2195 0, 1, 0, 2, 0, 1, 0, 3,
2196 0, 1, 0, 2, 0, 1, 0, 4,
2197 0, 1, 0, 2, 0, 1, 0, 3,
2198 0, 1, 0, 2, 0, 1, 0, 5,
2199 0, 1, 0, 2, 0, 1, 0, 3,
2200 0, 1, 0, 2, 0, 1, 0, 4,
2201 0, 1, 0, 2, 0, 1, 0, 3,
2202 0, 1, 0, 2, 0, 1, 0, 7,
2203 0, 1, 0, 2, 0, 1, 0, 3,
2204 0, 1, 0, 2, 0, 1, 0, 4,
2205 0, 1, 0, 2, 0, 1, 0, 3,
2206 0, 1, 0, 2, 0, 1, 0, 5,
2207 0, 1, 0, 2, 0, 1, 0, 3,
2208 0, 1, 0, 2, 0, 1, 0, 4,
2209 0, 1, 0, 2, 0, 1, 0, 3,
2210 0, 1, 0, 2, 0, 1, 0, 6,
2211 0, 1, 0, 2, 0, 1, 0, 3,
2212 0, 1, 0, 2, 0, 1, 0, 4,
2213 0, 1, 0, 2, 0, 1, 0, 3,
2214 0, 1, 0, 2, 0, 1, 0, 5,
2215 0, 1, 0, 2, 0, 1, 0, 3,
2216 0, 1, 0, 2, 0, 1, 0, 4,
2217 0, 1, 0, 2, 0, 1, 0, 3,
2218 0, 1, 0, 2, 0, 1, 0, 8
2223 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2226 * Now we also need to check the mapping array in a couple of ways.
2227 * 1) Did we move the cum-ack point?
2229 * When you first glance at this you might think that all entries that
2230 * make up the position of the cum-ack would be in the nr-mapping
2231 * array only.. i.e. things up to the cum-ack are always
2232 * deliverable. Thats true with one exception, when its a fragmented
2233 * message we may not deliver the data until some threshold (or all
2234 * of it) is in place. So we must OR the nr_mapping_array and
2235 * mapping_array to get a true picture of the cum-ack.
2237 struct sctp_association *asoc;
2240 int slide_from, slide_end, lgap, distance;
2241 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2245 old_cumack = asoc->cumulative_tsn;
2246 old_base = asoc->mapping_array_base_tsn;
2247 old_highest = asoc->highest_tsn_inside_map;
2249 * We could probably improve this a small bit by calculating the
2250 * offset of the current cum-ack as the starting point.
2253 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2254 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2258 /* there is a 0 bit */
2259 at += sctp_map_lookup_tab[val];
2263 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2265 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2266 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2268 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2269 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2271 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2272 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2273 sctp_print_mapping_array(asoc);
2274 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2275 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2277 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2278 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2281 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2282 highest_tsn = asoc->highest_tsn_inside_nr_map;
2284 highest_tsn = asoc->highest_tsn_inside_map;
2286 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2287 /* The complete array was completed by a single FR */
2288 /* highest becomes the cum-ack */
2296 /* clear the array */
2297 clr = ((at + 7) >> 3);
2298 if (clr > asoc->mapping_array_size) {
2299 clr = asoc->mapping_array_size;
2301 memset(asoc->mapping_array, 0, clr);
2302 memset(asoc->nr_mapping_array, 0, clr);
2304 for (i = 0; i < asoc->mapping_array_size; i++) {
2305 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2306 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2307 sctp_print_mapping_array(asoc);
2311 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2312 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2313 } else if (at >= 8) {
2314 /* we can slide the mapping array down */
2315 /* slide_from holds where we hit the first NON 0xff byte */
2318 * now calculate the ceiling of the move using our highest
2321 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2322 slide_end = (lgap >> 3);
2323 if (slide_end < slide_from) {
2324 sctp_print_mapping_array(asoc);
2326 panic("impossible slide");
2328 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2329 lgap, slide_end, slide_from, at);
2333 if (slide_end > asoc->mapping_array_size) {
2335 panic("would overrun buffer");
2337 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2338 asoc->mapping_array_size, slide_end);
2339 slide_end = asoc->mapping_array_size;
2342 distance = (slide_end - slide_from) + 1;
2343 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2344 sctp_log_map(old_base, old_cumack, old_highest,
2345 SCTP_MAP_PREPARE_SLIDE);
2346 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2347 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2349 if (distance + slide_from > asoc->mapping_array_size ||
2352 * Here we do NOT slide forward the array so that
2353 * hopefully when more data comes in to fill it up
2354 * we will be able to slide it forward. Really I
2355 * don't think this should happen :-0
2358 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2359 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2360 (uint32_t) asoc->mapping_array_size,
2361 SCTP_MAP_SLIDE_NONE);
2366 for (ii = 0; ii < distance; ii++) {
2367 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2368 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2371 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2372 asoc->mapping_array[ii] = 0;
2373 asoc->nr_mapping_array[ii] = 0;
2375 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2376 asoc->highest_tsn_inside_map += (slide_from << 3);
2378 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2379 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2381 asoc->mapping_array_base_tsn += (slide_from << 3);
2382 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2383 sctp_log_map(asoc->mapping_array_base_tsn,
2384 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2385 SCTP_MAP_SLIDE_RESULT);
2392 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2394 struct sctp_association *asoc;
2395 uint32_t highest_tsn;
2398 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2399 highest_tsn = asoc->highest_tsn_inside_nr_map;
2401 highest_tsn = asoc->highest_tsn_inside_map;
2405 * Now we need to see if we need to queue a sack or just start the
2406 * timer (if allowed).
2408 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2410 * Ok special case, in SHUTDOWN-SENT case. here we maker
2411 * sure SACK timer is off and instead send a SHUTDOWN and a
2414 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2415 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2416 stcb->sctp_ep, stcb, NULL,
2417 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2419 sctp_send_shutdown(stcb,
2420 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2421 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2425 /* is there a gap now ? */
2426 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2429 * CMT DAC algorithm: increase number of packets received
2432 stcb->asoc.cmt_dac_pkts_rcvd++;
2434 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2436 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2438 (stcb->asoc.numduptsns) || /* we have dup's */
2439 (is_a_gap) || /* is still a gap */
2440 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2441 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2444 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2445 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2446 (stcb->asoc.send_sack == 0) &&
2447 (stcb->asoc.numduptsns == 0) &&
2448 (stcb->asoc.delayed_ack) &&
2449 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2452 * CMT DAC algorithm: With CMT, delay acks
2453 * even in the face of
2455 * reordering. Therefore, if acks that do not
2456 * have to be sent because of the above
2457 * reasons, will be delayed. That is, acks
2458 * that would have been sent due to gap
2459 * reports will be delayed with DAC. Start
2460 * the delayed ack timer.
2462 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2463 stcb->sctp_ep, stcb, NULL);
2466 * Ok we must build a SACK since the timer
2467 * is pending, we got our first packet OR
2468 * there are gaps or duplicates.
2470 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2471 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2474 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2475 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2476 stcb->sctp_ep, stcb, NULL);
2483 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2484 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2485 struct sctp_nets *net, uint32_t * high_tsn)
2487 struct sctp_chunkhdr *ch, chunk_buf;
2488 struct sctp_association *asoc;
2489 int num_chunks = 0; /* number of control chunks processed */
2491 int chk_length, break_flag, last_chunk;
2492 int abort_flag = 0, was_a_gap;
2494 uint32_t highest_tsn;
2497 sctp_set_rwnd(stcb, &stcb->asoc);
2500 SCTP_TCB_LOCK_ASSERT(stcb);
2502 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2503 highest_tsn = asoc->highest_tsn_inside_nr_map;
2505 highest_tsn = asoc->highest_tsn_inside_map;
2507 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2509 * setup where we got the last DATA packet from for any SACK that
2510 * may need to go out. Don't bump the net. This is done ONLY when a
2511 * chunk is assigned.
2513 asoc->last_data_chunk_from = net;
2516 * Now before we proceed we must figure out if this is a wasted
2517 * cluster... i.e. it is a small packet sent in and yet the driver
2518 * underneath allocated a full cluster for it. If so we must copy it
2519 * to a smaller mbuf and free up the cluster mbuf. This will help
2520 * with cluster starvation. Note for __Panda__ we don't do this
2521 * since it has clusters all the way down to 64 bytes.
2523 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2524 /* we only handle mbufs that are singletons.. not chains */
2525 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2527 /* ok lets see if we can copy the data up */
2530 /* get the pointers and copy */
2531 to = mtod(m, caddr_t *);
2532 from = mtod((*mm), caddr_t *);
2533 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2534 /* copy the length and free up the old */
2535 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2537 /* success, back copy */
2540 /* We are in trouble in the mbuf world .. yikes */
2544 /* get pointer to the first chunk header */
2545 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2546 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2551 * process all DATA chunks...
2553 *high_tsn = asoc->cumulative_tsn;
2555 asoc->data_pkts_seen++;
2556 while (stop_proc == 0) {
2557 /* validate chunk length */
2558 chk_length = ntohs(ch->chunk_length);
2559 if (length - *offset < chk_length) {
2560 /* all done, mutulated chunk */
2564 if ((asoc->idata_supported == 1) &&
2565 (ch->chunk_type == SCTP_DATA)) {
2566 struct mbuf *op_err;
2567 char msg[SCTP_DIAG_INFO_LEN];
2569 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2570 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2571 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2572 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2575 if ((asoc->idata_supported == 0) &&
2576 (ch->chunk_type == SCTP_IDATA)) {
2577 struct mbuf *op_err;
2578 char msg[SCTP_DIAG_INFO_LEN];
2580 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2581 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2582 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2583 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2586 if ((ch->chunk_type == SCTP_DATA) ||
2587 (ch->chunk_type == SCTP_IDATA)) {
2590 if (ch->chunk_type == SCTP_DATA) {
2591 clen = sizeof(struct sctp_data_chunk);
2593 clen = sizeof(struct sctp_idata_chunk);
2595 if (chk_length < clen) {
2597 * Need to send an abort since we had a
2598 * invalid data chunk.
2600 struct mbuf *op_err;
2601 char msg[SCTP_DIAG_INFO_LEN];
2603 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2605 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2606 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2607 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2610 #ifdef SCTP_AUDITING_ENABLED
2611 sctp_audit_log(0xB1, 0);
2613 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2618 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2619 chk_length, net, high_tsn, &abort_flag, &break_flag,
2620 last_chunk, ch->chunk_type)) {
2628 * Set because of out of rwnd space and no
2629 * drop rep space left.
2635 /* not a data chunk in the data region */
2636 switch (ch->chunk_type) {
2637 case SCTP_INITIATION:
2638 case SCTP_INITIATION_ACK:
2639 case SCTP_SELECTIVE_ACK:
2640 case SCTP_NR_SELECTIVE_ACK:
2641 case SCTP_HEARTBEAT_REQUEST:
2642 case SCTP_HEARTBEAT_ACK:
2643 case SCTP_ABORT_ASSOCIATION:
2645 case SCTP_SHUTDOWN_ACK:
2646 case SCTP_OPERATION_ERROR:
2647 case SCTP_COOKIE_ECHO:
2648 case SCTP_COOKIE_ACK:
2651 case SCTP_SHUTDOWN_COMPLETE:
2652 case SCTP_AUTHENTICATION:
2653 case SCTP_ASCONF_ACK:
2654 case SCTP_PACKET_DROPPED:
2655 case SCTP_STREAM_RESET:
2656 case SCTP_FORWARD_CUM_TSN:
2660 * Now, what do we do with KNOWN
2661 * chunks that are NOT in the right
2664 * For now, I do nothing but ignore
2665 * them. We may later want to add
2666 * sysctl stuff to switch out and do
2667 * either an ABORT() or possibly
2670 struct mbuf *op_err;
2671 char msg[SCTP_DIAG_INFO_LEN];
2673 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2675 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2676 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2680 /* unknown chunk type, use bit rules */
2681 if (ch->chunk_type & 0x40) {
2682 /* Add a error report to the queue */
2683 struct mbuf *op_err;
2684 struct sctp_gen_error_cause *cause;
2686 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2687 0, M_NOWAIT, 1, MT_DATA);
2688 if (op_err != NULL) {
2689 cause = mtod(op_err, struct sctp_gen_error_cause *);
2690 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2691 cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2692 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2693 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2694 if (SCTP_BUF_NEXT(op_err) != NULL) {
2695 sctp_queue_op_err(stcb, op_err);
2697 sctp_m_freem(op_err);
2701 if ((ch->chunk_type & 0x80) == 0) {
2702 /* discard the rest of this packet */
2704 } /* else skip this bad chunk and
2707 } /* switch of chunk type */
2709 *offset += SCTP_SIZE32(chk_length);
2710 if ((*offset >= length) || stop_proc) {
2711 /* no more data left in the mbuf chain */
2715 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2716 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2725 * we need to report rwnd overrun drops.
2727 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2731 * Did we get data, if so update the time for auto-close and
2732 * give peer credit for being alive.
2734 SCTP_STAT_INCR(sctps_recvpktwithdata);
2735 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2736 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2737 stcb->asoc.overall_error_count,
2739 SCTP_FROM_SCTP_INDATA,
2742 stcb->asoc.overall_error_count = 0;
2743 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2745 /* now service all of the reassm queue if needed */
2746 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2747 /* Assure that we ack right away */
2748 stcb->asoc.send_sack = 1;
2750 /* Start a sack timer or QUEUE a SACK for sending */
2751 sctp_sack_check(stcb, was_a_gap);
2756 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2757 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2759 uint32_t * biggest_newly_acked_tsn,
2760 uint32_t * this_sack_lowest_newack,
2763 struct sctp_tmit_chunk *tp1;
2764 unsigned int theTSN;
2765 int j, wake_him = 0, circled = 0;
2767 /* Recover the tp1 we last saw */
2770 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2772 for (j = frag_strt; j <= frag_end; j++) {
2773 theTSN = j + last_tsn;
2775 if (tp1->rec.data.doing_fast_retransmit)
2779 * CMT: CUCv2 algorithm. For each TSN being
2780 * processed from the sent queue, track the
2781 * next expected pseudo-cumack, or
2782 * rtx_pseudo_cumack, if required. Separate
2783 * cumack trackers for first transmissions,
2784 * and retransmissions.
2786 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2787 (tp1->whoTo->find_pseudo_cumack == 1) &&
2788 (tp1->snd_count == 1)) {
2789 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2790 tp1->whoTo->find_pseudo_cumack = 0;
2792 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2793 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2794 (tp1->snd_count > 1)) {
2795 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2796 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2798 if (tp1->rec.data.TSN_seq == theTSN) {
2799 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2801 * must be held until
2804 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2806 * If it is less than RESEND, it is
2807 * now no-longer in flight.
2808 * Higher values may already be set
2809 * via previous Gap Ack Blocks...
2810 * i.e. ACKED or RESEND.
2812 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2813 *biggest_newly_acked_tsn)) {
2814 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2817 * CMT: SFR algo (and HTNA) - set
2818 * saw_newack to 1 for dest being
2819 * newly acked. update
2820 * this_sack_highest_newack if
2823 if (tp1->rec.data.chunk_was_revoked == 0)
2824 tp1->whoTo->saw_newack = 1;
2826 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2827 tp1->whoTo->this_sack_highest_newack)) {
2828 tp1->whoTo->this_sack_highest_newack =
2829 tp1->rec.data.TSN_seq;
2832 * CMT DAC algo: also update
2833 * this_sack_lowest_newack
2835 if (*this_sack_lowest_newack == 0) {
2836 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2837 sctp_log_sack(*this_sack_lowest_newack,
2839 tp1->rec.data.TSN_seq,
2842 SCTP_LOG_TSN_ACKED);
2844 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2847 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2848 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2849 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2850 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2851 * Separate pseudo_cumack trackers for first transmissions and
2854 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2855 if (tp1->rec.data.chunk_was_revoked == 0) {
2856 tp1->whoTo->new_pseudo_cumack = 1;
2858 tp1->whoTo->find_pseudo_cumack = 1;
2860 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2861 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2863 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2864 if (tp1->rec.data.chunk_was_revoked == 0) {
2865 tp1->whoTo->new_pseudo_cumack = 1;
2867 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2869 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2870 sctp_log_sack(*biggest_newly_acked_tsn,
2872 tp1->rec.data.TSN_seq,
2875 SCTP_LOG_TSN_ACKED);
2877 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2878 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2879 tp1->whoTo->flight_size,
2881 (uint32_t) (uintptr_t) tp1->whoTo,
2882 tp1->rec.data.TSN_seq);
2884 sctp_flight_size_decrease(tp1);
2885 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2886 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2889 sctp_total_flight_decrease(stcb, tp1);
2891 tp1->whoTo->net_ack += tp1->send_size;
2892 if (tp1->snd_count < 2) {
2894 * True non-retransmited chunk
2896 tp1->whoTo->net_ack2 += tp1->send_size;
2904 sctp_calculate_rto(stcb,
2907 &tp1->sent_rcv_time,
2908 sctp_align_safe_nocopy,
2909 SCTP_RTT_FROM_DATA);
2912 if (tp1->whoTo->rto_needed == 0) {
2913 tp1->whoTo->rto_needed = 1;
2919 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2920 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2921 stcb->asoc.this_sack_highest_gap)) {
2922 stcb->asoc.this_sack_highest_gap =
2923 tp1->rec.data.TSN_seq;
2925 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2926 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2927 #ifdef SCTP_AUDITING_ENABLED
2928 sctp_audit_log(0xB2,
2929 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2934 * All chunks NOT UNSENT fall through here and are marked
2935 * (leave PR-SCTP ones that are to skip alone though)
2937 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2938 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2939 tp1->sent = SCTP_DATAGRAM_MARKED;
2941 if (tp1->rec.data.chunk_was_revoked) {
2942 /* deflate the cwnd */
2943 tp1->whoTo->cwnd -= tp1->book_size;
2944 tp1->rec.data.chunk_was_revoked = 0;
2946 /* NR Sack code here */
2948 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2949 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2950 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2953 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2956 if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
2957 (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
2958 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
2959 stcb->asoc.trigger_reset = 1;
2961 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2967 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2968 sctp_m_freem(tp1->data);
2975 } /* if (tp1->TSN_seq == theTSN) */
2976 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2979 tp1 = TAILQ_NEXT(tp1, sctp_next);
2980 if ((tp1 == NULL) && (circled == 0)) {
2982 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2984 } /* end while (tp1) */
2987 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2989 /* In case the fragments were not in order we must reset */
2990 } /* end for (j = fragStart */
2992 return (wake_him); /* Return value only used for nr-sack */
2997 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2998 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2999 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3000 int num_seg, int num_nr_seg, int *rto_ok)
3002 struct sctp_gap_ack_block *frag, block;
3003 struct sctp_tmit_chunk *tp1;
3008 uint16_t frag_strt, frag_end, prev_frag_end;
3010 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3014 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3017 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3019 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3020 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3021 *offset += sizeof(block);
3023 return (chunk_freed);
3025 frag_strt = ntohs(frag->start);
3026 frag_end = ntohs(frag->end);
3028 if (frag_strt > frag_end) {
3029 /* This gap report is malformed, skip it. */
3032 if (frag_strt <= prev_frag_end) {
3033 /* This gap report is not in order, so restart. */
3034 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3036 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3037 *biggest_tsn_acked = last_tsn + frag_end;
3044 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3045 non_revocable, &num_frs, biggest_newly_acked_tsn,
3046 this_sack_lowest_newack, rto_ok)) {
3049 prev_frag_end = frag_end;
3051 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3053 sctp_log_fr(*biggest_tsn_acked,
3054 *biggest_newly_acked_tsn,
3055 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3057 return (chunk_freed);
3061 sctp_check_for_revoked(struct sctp_tcb *stcb,
3062 struct sctp_association *asoc, uint32_t cumack,
3063 uint32_t biggest_tsn_acked)
3065 struct sctp_tmit_chunk *tp1;
3067 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3068 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3070 * ok this guy is either ACK or MARKED. If it is
3071 * ACKED it has been previously acked but not this
3072 * time i.e. revoked. If it is MARKED it was ACK'ed
3075 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3078 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3079 /* it has been revoked */
3080 tp1->sent = SCTP_DATAGRAM_SENT;
3081 tp1->rec.data.chunk_was_revoked = 1;
3083 * We must add this stuff back in to assure
3084 * timers and such get started.
3086 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3087 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3088 tp1->whoTo->flight_size,
3090 (uint32_t) (uintptr_t) tp1->whoTo,
3091 tp1->rec.data.TSN_seq);
3093 sctp_flight_size_increase(tp1);
3094 sctp_total_flight_increase(stcb, tp1);
3096 * We inflate the cwnd to compensate for our
3097 * artificial inflation of the flight_size.
3099 tp1->whoTo->cwnd += tp1->book_size;
3100 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3101 sctp_log_sack(asoc->last_acked_seq,
3103 tp1->rec.data.TSN_seq,
3106 SCTP_LOG_TSN_REVOKED);
3108 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3109 /* it has been re-acked in this SACK */
3110 tp1->sent = SCTP_DATAGRAM_ACKED;
3113 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3120 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3121 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3123 struct sctp_tmit_chunk *tp1;
3124 int strike_flag = 0;
3126 int tot_retrans = 0;
3127 uint32_t sending_seq;
3128 struct sctp_nets *net;
3129 int num_dests_sacked = 0;
3132 * select the sending_seq, this is either the next thing ready to be
3133 * sent but not transmitted, OR, the next seq we assign.
3135 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3137 sending_seq = asoc->sending_seq;
3139 sending_seq = tp1->rec.data.TSN_seq;
3142 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3143 if ((asoc->sctp_cmt_on_off > 0) &&
3144 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3145 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3146 if (net->saw_newack)
3150 if (stcb->asoc.prsctp_supported) {
3151 (void)SCTP_GETTIME_TIMEVAL(&now);
3153 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3155 if (tp1->no_fr_allowed) {
3156 /* this one had a timeout or something */
3159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3160 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3161 sctp_log_fr(biggest_tsn_newly_acked,
3162 tp1->rec.data.TSN_seq,
3164 SCTP_FR_LOG_CHECK_STRIKE);
3166 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3167 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3171 if (stcb->asoc.prsctp_supported) {
3172 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3173 /* Is it expired? */
3174 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3175 /* Yes so drop it */
3176 if (tp1->data != NULL) {
3177 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3178 SCTP_SO_NOT_LOCKED);
3184 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3185 /* we are beyond the tsn in the sack */
3188 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3189 /* either a RESEND, ACKED, or MARKED */
3191 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3192 /* Continue strikin FWD-TSN chunks */
3193 tp1->rec.data.fwd_tsn_cnt++;
3198 * CMT : SFR algo (covers part of DAC and HTNA as well)
3200 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3202 * No new acks were receieved for data sent to this
3203 * dest. Therefore, according to the SFR algo for
3204 * CMT, no data sent to this dest can be marked for
3205 * FR using this SACK.
3208 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3209 tp1->whoTo->this_sack_highest_newack)) {
3211 * CMT: New acks were receieved for data sent to
3212 * this dest. But no new acks were seen for data
3213 * sent after tp1. Therefore, according to the SFR
3214 * algo for CMT, tp1 cannot be marked for FR using
3215 * this SACK. This step covers part of the DAC algo
3216 * and the HTNA algo as well.
3221 * Here we check to see if we were have already done a FR
3222 * and if so we see if the biggest TSN we saw in the sack is
3223 * smaller than the recovery point. If so we don't strike
3224 * the tsn... otherwise we CAN strike the TSN.
3227 * @@@ JRI: Check for CMT if (accum_moved &&
3228 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3231 if (accum_moved && asoc->fast_retran_loss_recovery) {
3233 * Strike the TSN if in fast-recovery and cum-ack
3236 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3237 sctp_log_fr(biggest_tsn_newly_acked,
3238 tp1->rec.data.TSN_seq,
3240 SCTP_FR_LOG_STRIKE_CHUNK);
3242 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3245 if ((asoc->sctp_cmt_on_off > 0) &&
3246 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3248 * CMT DAC algorithm: If SACK flag is set to
3249 * 0, then lowest_newack test will not pass
3250 * because it would have been set to the
3251 * cumack earlier. If not already to be
3252 * rtx'd, If not a mixed sack and if tp1 is
3253 * not between two sacked TSNs, then mark by
3254 * one more. NOTE that we are marking by one
3255 * additional time since the SACK DAC flag
3256 * indicates that two packets have been
3257 * received after this missing TSN.
3259 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3260 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3261 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3262 sctp_log_fr(16 + num_dests_sacked,
3263 tp1->rec.data.TSN_seq,
3265 SCTP_FR_LOG_STRIKE_CHUNK);
3270 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3271 (asoc->sctp_cmt_on_off == 0)) {
3273 * For those that have done a FR we must take
3274 * special consideration if we strike. I.e the
3275 * biggest_newly_acked must be higher than the
3276 * sending_seq at the time we did the FR.
3279 #ifdef SCTP_FR_TO_ALTERNATE
3281 * If FR's go to new networks, then we must only do
3282 * this for singly homed asoc's. However if the FR's
3283 * go to the same network (Armando's work) then its
3284 * ok to FR multiple times.
3292 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3293 tp1->rec.data.fast_retran_tsn)) {
3295 * Strike the TSN, since this ack is
3296 * beyond where things were when we
3299 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3300 sctp_log_fr(biggest_tsn_newly_acked,
3301 tp1->rec.data.TSN_seq,
3303 SCTP_FR_LOG_STRIKE_CHUNK);
3305 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3309 if ((asoc->sctp_cmt_on_off > 0) &&
3310 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3312 * CMT DAC algorithm: If
3313 * SACK flag is set to 0,
3314 * then lowest_newack test
3315 * will not pass because it
3316 * would have been set to
3317 * the cumack earlier. If
3318 * not already to be rtx'd,
3319 * If not a mixed sack and
3320 * if tp1 is not between two
3321 * sacked TSNs, then mark by
3322 * one more. NOTE that we
3323 * are marking by one
3324 * additional time since the
3325 * SACK DAC flag indicates
3326 * that two packets have
3327 * been received after this
3330 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3331 (num_dests_sacked == 1) &&
3332 SCTP_TSN_GT(this_sack_lowest_newack,
3333 tp1->rec.data.TSN_seq)) {
3334 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3335 sctp_log_fr(32 + num_dests_sacked,
3336 tp1->rec.data.TSN_seq,
3338 SCTP_FR_LOG_STRIKE_CHUNK);
3340 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3348 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3351 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3352 biggest_tsn_newly_acked)) {
3354 * We don't strike these: This is the HTNA
3355 * algorithm i.e. we don't strike If our TSN is
3356 * larger than the Highest TSN Newly Acked.
3360 /* Strike the TSN */
3361 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3362 sctp_log_fr(biggest_tsn_newly_acked,
3363 tp1->rec.data.TSN_seq,
3365 SCTP_FR_LOG_STRIKE_CHUNK);
3367 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3370 if ((asoc->sctp_cmt_on_off > 0) &&
3371 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3373 * CMT DAC algorithm: If SACK flag is set to
3374 * 0, then lowest_newack test will not pass
3375 * because it would have been set to the
3376 * cumack earlier. If not already to be
3377 * rtx'd, If not a mixed sack and if tp1 is
3378 * not between two sacked TSNs, then mark by
3379 * one more. NOTE that we are marking by one
3380 * additional time since the SACK DAC flag
3381 * indicates that two packets have been
3382 * received after this missing TSN.
3384 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3385 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3386 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3387 sctp_log_fr(48 + num_dests_sacked,
3388 tp1->rec.data.TSN_seq,
3390 SCTP_FR_LOG_STRIKE_CHUNK);
3396 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3397 struct sctp_nets *alt;
3399 /* fix counts and things */
3400 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3401 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3402 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3404 (uint32_t) (uintptr_t) tp1->whoTo,
3405 tp1->rec.data.TSN_seq);
3408 tp1->whoTo->net_ack++;
3409 sctp_flight_size_decrease(tp1);
3410 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3411 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3415 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3416 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3417 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3419 /* add back to the rwnd */
3420 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3422 /* remove from the total flight */
3423 sctp_total_flight_decrease(stcb, tp1);
3425 if ((stcb->asoc.prsctp_supported) &&
3426 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3428 * Has it been retransmitted tv_sec times? -
3429 * we store the retran count there.
3431 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3432 /* Yes, so drop it */
3433 if (tp1->data != NULL) {
3434 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3435 SCTP_SO_NOT_LOCKED);
3437 /* Make sure to flag we had a FR */
3438 tp1->whoTo->net_ack++;
3443 * SCTP_PRINTF("OK, we are now ready to FR this
3446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3447 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3451 /* This is a subsequent FR */
3452 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3454 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3455 if (asoc->sctp_cmt_on_off > 0) {
3457 * CMT: Using RTX_SSTHRESH policy for CMT.
3458 * If CMT is being used, then pick dest with
3459 * largest ssthresh for any retransmission.
3461 tp1->no_fr_allowed = 1;
3463 /* sa_ignore NO_NULL_CHK */
3464 if (asoc->sctp_cmt_pf > 0) {
3466 * JRS 5/18/07 - If CMT PF is on,
3467 * use the PF version of
3470 alt = sctp_find_alternate_net(stcb, alt, 2);
3473 * JRS 5/18/07 - If only CMT is on,
3474 * use the CMT version of
3477 /* sa_ignore NO_NULL_CHK */
3478 alt = sctp_find_alternate_net(stcb, alt, 1);
3484 * CUCv2: If a different dest is picked for
3485 * the retransmission, then new
3486 * (rtx-)pseudo_cumack needs to be tracked
3487 * for orig dest. Let CUCv2 track new (rtx-)
3488 * pseudo-cumack always.
3491 tp1->whoTo->find_pseudo_cumack = 1;
3492 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3494 } else {/* CMT is OFF */
3496 #ifdef SCTP_FR_TO_ALTERNATE
3497 /* Can we find an alternate? */
3498 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3501 * default behavior is to NOT retransmit
3502 * FR's to an alternate. Armando Caro's
3503 * paper details why.
3509 tp1->rec.data.doing_fast_retransmit = 1;
3511 /* mark the sending seq for possible subsequent FR's */
3513 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3514 * (uint32_t)tpi->rec.data.TSN_seq);
3516 if (TAILQ_EMPTY(&asoc->send_queue)) {
3518 * If the queue of send is empty then its
3519 * the next sequence number that will be
3520 * assigned so we subtract one from this to
3521 * get the one we last sent.
3523 tp1->rec.data.fast_retran_tsn = sending_seq;
3526 * If there are chunks on the send queue
3527 * (unsent data that has made it from the
3528 * stream queues but not out the door, we
3529 * take the first one (which will have the
3530 * lowest TSN) and subtract one to get the
3533 struct sctp_tmit_chunk *ttt;
3535 ttt = TAILQ_FIRST(&asoc->send_queue);
3536 tp1->rec.data.fast_retran_tsn =
3537 ttt->rec.data.TSN_seq;
3542 * this guy had a RTO calculation pending on
3545 if ((tp1->whoTo != NULL) &&
3546 (tp1->whoTo->rto_needed == 0)) {
3547 tp1->whoTo->rto_needed = 1;
3551 if (alt != tp1->whoTo) {
3552 /* yes, there is an alternate. */
3553 sctp_free_remote_addr(tp1->whoTo);
3554 /* sa_ignore FREED_MEMORY */
3556 atomic_add_int(&alt->ref_count, 1);
3562 struct sctp_tmit_chunk *
3563 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3564 struct sctp_association *asoc)
3566 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3570 if (asoc->prsctp_supported == 0) {
3573 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3574 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3575 tp1->sent != SCTP_DATAGRAM_RESEND &&
3576 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3577 /* no chance to advance, out of here */
3580 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3581 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3582 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3583 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3584 asoc->advanced_peer_ack_point,
3585 tp1->rec.data.TSN_seq, 0, 0);
3588 if (!PR_SCTP_ENABLED(tp1->flags)) {
3590 * We can't fwd-tsn past any that are reliable aka
3591 * retransmitted until the asoc fails.
3596 (void)SCTP_GETTIME_TIMEVAL(&now);
3600 * now we got a chunk which is marked for another
3601 * retransmission to a PR-stream but has run out its chances
3602 * already maybe OR has been marked to skip now. Can we skip
3603 * it if its a resend?
3605 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3606 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3608 * Now is this one marked for resend and its time is
3611 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3612 /* Yes so drop it */
3614 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3615 1, SCTP_SO_NOT_LOCKED);
3619 * No, we are done when hit one for resend
3620 * whos time as not expired.
3626 * Ok now if this chunk is marked to drop it we can clean up
3627 * the chunk, advance our peer ack point and we can check
3630 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3631 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3632 /* advance PeerAckPoint goes forward */
3633 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3634 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3636 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3637 /* No update but we do save the chk */
3642 * If it is still in RESEND we can advance no
3652 sctp_fs_audit(struct sctp_association *asoc)
3654 struct sctp_tmit_chunk *chk;
3655 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3659 int entry_flight, entry_cnt;
3665 entry_flight = asoc->total_flight;
3666 entry_cnt = asoc->total_flight_count;
3668 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3671 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3672 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3673 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3674 chk->rec.data.TSN_seq,
3678 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3680 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3682 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3689 if ((inflight > 0) || (inbetween > 0)) {
3691 panic("Flight size-express incorrect? \n");
3693 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3694 entry_flight, entry_cnt);
3696 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3697 inflight, inbetween, resend, above, acked);
3706 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3707 struct sctp_association *asoc,
3708 struct sctp_tmit_chunk *tp1)
3710 tp1->window_probe = 0;
3711 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3712 /* TSN's skipped we do NOT move back. */
3713 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3714 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3716 (uint32_t) (uintptr_t) tp1->whoTo,
3717 tp1->rec.data.TSN_seq);
3720 /* First setup this by shrinking flight */
3721 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3722 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3725 sctp_flight_size_decrease(tp1);
3726 sctp_total_flight_decrease(stcb, tp1);
3727 /* Now mark for resend */
3728 tp1->sent = SCTP_DATAGRAM_RESEND;
3729 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3731 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3732 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3733 tp1->whoTo->flight_size,
3735 (uint32_t) (uintptr_t) tp1->whoTo,
3736 tp1->rec.data.TSN_seq);
3741 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3742 uint32_t rwnd, int *abort_now, int ecne_seen)
3744 struct sctp_nets *net;
3745 struct sctp_association *asoc;
3746 struct sctp_tmit_chunk *tp1, *tp2;
3748 int win_probe_recovery = 0;
3749 int win_probe_recovered = 0;
3750 int j, done_once = 0;
3754 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3755 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3756 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3758 SCTP_TCB_LOCK_ASSERT(stcb);
3759 #ifdef SCTP_ASOCLOG_OF_TSNS
3760 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3761 stcb->asoc.cumack_log_at++;
3762 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3763 stcb->asoc.cumack_log_at = 0;
3767 old_rwnd = asoc->peers_rwnd;
3768 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3771 } else if (asoc->last_acked_seq == cumack) {
3772 /* Window update sack */
3773 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3774 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3775 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3776 /* SWS sender side engages */
3777 asoc->peers_rwnd = 0;
3779 if (asoc->peers_rwnd > old_rwnd) {
3784 /* First setup for CC stuff */
3785 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3786 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3787 /* Drag along the window_tsn for cwr's */
3788 net->cwr_window_tsn = cumack;
3790 net->prev_cwnd = net->cwnd;
3795 * CMT: Reset CUC and Fast recovery algo variables before
3798 net->new_pseudo_cumack = 0;
3799 net->will_exit_fast_recovery = 0;
3800 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3801 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3804 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3805 tp1 = TAILQ_LAST(&asoc->sent_queue,
3806 sctpchunk_listhead);
3807 send_s = tp1->rec.data.TSN_seq + 1;
3809 send_s = asoc->sending_seq;
3811 if (SCTP_TSN_GE(cumack, send_s)) {
3812 struct mbuf *op_err;
3813 char msg[SCTP_DIAG_INFO_LEN];
3817 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3819 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3820 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3821 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3824 asoc->this_sack_highest_gap = cumack;
3825 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3826 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3827 stcb->asoc.overall_error_count,
3829 SCTP_FROM_SCTP_INDATA,
3832 stcb->asoc.overall_error_count = 0;
3833 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3834 /* process the new consecutive TSN first */
3835 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3836 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3837 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3838 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3840 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3842 * If it is less than ACKED, it is
3843 * now no-longer in flight. Higher
3844 * values may occur during marking
3846 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3848 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3849 tp1->whoTo->flight_size,
3851 (uint32_t) (uintptr_t) tp1->whoTo,
3852 tp1->rec.data.TSN_seq);
3854 sctp_flight_size_decrease(tp1);
3855 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3856 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3859 /* sa_ignore NO_NULL_CHK */
3860 sctp_total_flight_decrease(stcb, tp1);
3862 tp1->whoTo->net_ack += tp1->send_size;
3863 if (tp1->snd_count < 2) {
3865 * True non-retransmited
3868 tp1->whoTo->net_ack2 +=
3871 /* update RTO too? */
3880 sctp_calculate_rto(stcb,
3882 &tp1->sent_rcv_time,
3883 sctp_align_safe_nocopy,
3884 SCTP_RTT_FROM_DATA);
3887 if (tp1->whoTo->rto_needed == 0) {
3888 tp1->whoTo->rto_needed = 1;
3894 * CMT: CUCv2 algorithm. From the
3895 * cumack'd TSNs, for each TSN being
3896 * acked for the first time, set the
3897 * following variables for the
3898 * corresp destination.
3899 * new_pseudo_cumack will trigger a
3901 * find_(rtx_)pseudo_cumack will
3902 * trigger search for the next
3903 * expected (rtx-)pseudo-cumack.
3905 tp1->whoTo->new_pseudo_cumack = 1;
3906 tp1->whoTo->find_pseudo_cumack = 1;
3907 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3909 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3910 /* sa_ignore NO_NULL_CHK */
3911 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3914 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3915 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3917 if (tp1->rec.data.chunk_was_revoked) {
3918 /* deflate the cwnd */
3919 tp1->whoTo->cwnd -= tp1->book_size;
3920 tp1->rec.data.chunk_was_revoked = 0;
3922 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3923 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3924 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3927 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3931 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3932 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3933 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3934 asoc->trigger_reset = 1;
3936 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3938 /* sa_ignore NO_NULL_CHK */
3939 sctp_free_bufspace(stcb, asoc, tp1, 1);
3940 sctp_m_freem(tp1->data);
3943 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3944 sctp_log_sack(asoc->last_acked_seq,
3946 tp1->rec.data.TSN_seq,
3949 SCTP_LOG_FREE_SENT);
3951 asoc->sent_queue_cnt--;
3952 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3959 /* sa_ignore NO_NULL_CHK */
3960 if (stcb->sctp_socket) {
3961 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3965 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3966 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3967 /* sa_ignore NO_NULL_CHK */
3968 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3970 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3971 so = SCTP_INP_SO(stcb->sctp_ep);
3972 atomic_add_int(&stcb->asoc.refcnt, 1);
3973 SCTP_TCB_UNLOCK(stcb);
3974 SCTP_SOCKET_LOCK(so, 1);
3975 SCTP_TCB_LOCK(stcb);
3976 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3977 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3978 /* assoc was freed while we were unlocked */
3979 SCTP_SOCKET_UNLOCK(so, 1);
3983 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3984 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3985 SCTP_SOCKET_UNLOCK(so, 1);
3988 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3989 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3993 /* JRS - Use the congestion control given in the CC module */
3994 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3995 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3996 if (net->net_ack2 > 0) {
3998 * Karn's rule applies to clearing error
3999 * count, this is optional.
4001 net->error_count = 0;
4002 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4003 /* addr came good */
4004 net->dest_state |= SCTP_ADDR_REACHABLE;
4005 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4006 0, (void *)net, SCTP_SO_NOT_LOCKED);
4008 if (net == stcb->asoc.primary_destination) {
4009 if (stcb->asoc.alternate) {
4011 * release the alternate,
4014 sctp_free_remote_addr(stcb->asoc.alternate);
4015 stcb->asoc.alternate = NULL;
4018 if (net->dest_state & SCTP_ADDR_PF) {
4019 net->dest_state &= ~SCTP_ADDR_PF;
4020 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4021 stcb->sctp_ep, stcb, net,
4022 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4023 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4024 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4025 /* Done with this net */
4028 /* restore any doubled timers */
4029 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4030 if (net->RTO < stcb->asoc.minrto) {
4031 net->RTO = stcb->asoc.minrto;
4033 if (net->RTO > stcb->asoc.maxrto) {
4034 net->RTO = stcb->asoc.maxrto;
4038 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4040 asoc->last_acked_seq = cumack;
4042 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4043 /* nothing left in-flight */
4044 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4045 net->flight_size = 0;
4046 net->partial_bytes_acked = 0;
4048 asoc->total_flight = 0;
4049 asoc->total_flight_count = 0;
4052 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4053 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4054 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4055 /* SWS sender side engages */
4056 asoc->peers_rwnd = 0;
4058 if (asoc->peers_rwnd > old_rwnd) {
4059 win_probe_recovery = 1;
4061 /* Now assure a timer where data is queued at */
4064 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4067 if (win_probe_recovery && (net->window_probe)) {
4068 win_probe_recovered = 1;
4070 * Find first chunk that was used with window probe
4071 * and clear the sent
4073 /* sa_ignore FREED_MEMORY */
4074 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4075 if (tp1->window_probe) {
4076 /* move back to data send queue */
4077 sctp_window_probe_recovery(stcb, asoc, tp1);
4082 if (net->RTO == 0) {
4083 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4085 to_ticks = MSEC_TO_TICKS(net->RTO);
4087 if (net->flight_size) {
4089 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4090 sctp_timeout_handler, &net->rxt_timer);
4091 if (net->window_probe) {
4092 net->window_probe = 0;
4095 if (net->window_probe) {
4097 * In window probes we must assure a timer
4098 * is still running there
4100 net->window_probe = 0;
4101 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4102 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4103 sctp_timeout_handler, &net->rxt_timer);
4105 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4106 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4108 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4113 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4114 (asoc->sent_queue_retran_cnt == 0) &&
4115 (win_probe_recovered == 0) &&
4118 * huh, this should not happen unless all packets are
4119 * PR-SCTP and marked to skip of course.
4121 if (sctp_fs_audit(asoc)) {
4122 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4123 net->flight_size = 0;
4125 asoc->total_flight = 0;
4126 asoc->total_flight_count = 0;
4127 asoc->sent_queue_retran_cnt = 0;
4128 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4129 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4130 sctp_flight_size_increase(tp1);
4131 sctp_total_flight_increase(stcb, tp1);
4132 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4133 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4140 /**********************************/
4141 /* Now what about shutdown issues */
4142 /**********************************/
4143 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4144 /* nothing left on sendqueue.. consider done */
4146 if ((asoc->stream_queue_cnt == 1) &&
4147 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4148 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4149 (asoc->locked_on_sending)
4151 struct sctp_stream_queue_pending *sp;
4154 * I may be in a state where we got all across.. but
4155 * cannot write more due to a shutdown... we abort
4156 * since the user did not indicate EOR in this case.
4157 * The sp will be cleaned during free of the asoc.
4159 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4161 if ((sp) && (sp->length == 0)) {
4162 /* Let cleanup code purge it */
4163 if (sp->msg_is_complete) {
4164 asoc->stream_queue_cnt--;
4166 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4167 asoc->locked_on_sending = NULL;
4168 asoc->stream_queue_cnt--;
4172 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4173 (asoc->stream_queue_cnt == 0)) {
4174 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4175 /* Need to abort here */
4176 struct mbuf *op_err;
4181 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4182 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4183 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4186 struct sctp_nets *netp;
4188 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4189 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4190 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4192 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4193 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4194 sctp_stop_timers_for_shutdown(stcb);
4195 if (asoc->alternate) {
4196 netp = asoc->alternate;
4198 netp = asoc->primary_destination;
4200 sctp_send_shutdown(stcb, netp);
4201 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4202 stcb->sctp_ep, stcb, netp);
4203 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4204 stcb->sctp_ep, stcb, netp);
4206 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4207 (asoc->stream_queue_cnt == 0)) {
4208 struct sctp_nets *netp;
4210 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4213 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4214 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4215 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4216 sctp_stop_timers_for_shutdown(stcb);
4217 if (asoc->alternate) {
4218 netp = asoc->alternate;
4220 netp = asoc->primary_destination;
4222 sctp_send_shutdown_ack(stcb, netp);
4223 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4224 stcb->sctp_ep, stcb, netp);
4227 /*********************************************/
4228 /* Here we perform PR-SCTP procedures */
4230 /*********************************************/
4231 /* C1. update advancedPeerAckPoint */
4232 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4233 asoc->advanced_peer_ack_point = cumack;
4235 /* PR-Sctp issues need to be addressed too */
4236 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4237 struct sctp_tmit_chunk *lchk;
4238 uint32_t old_adv_peer_ack_point;
4240 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4241 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4242 /* C3. See if we need to send a Fwd-TSN */
4243 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4245 * ISSUE with ECN, see FWD-TSN processing.
4247 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4248 send_forward_tsn(stcb, asoc);
4250 /* try to FR fwd-tsn's that get lost too */
4251 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4252 send_forward_tsn(stcb, asoc);
4257 /* Assure a timer is up */
4258 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4259 stcb->sctp_ep, stcb, lchk->whoTo);
4262 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4263 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4265 stcb->asoc.peers_rwnd,
4266 stcb->asoc.total_flight,
4267 stcb->asoc.total_output_queue_size);
4272 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4273 struct sctp_tcb *stcb,
4274 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4275 int *abort_now, uint8_t flags,
4276 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4278 struct sctp_association *asoc;
4279 struct sctp_tmit_chunk *tp1, *tp2;
4280 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4281 uint16_t wake_him = 0;
4282 uint32_t send_s = 0;
4284 int accum_moved = 0;
4285 int will_exit_fast_recovery = 0;
4286 uint32_t a_rwnd, old_rwnd;
4287 int win_probe_recovery = 0;
4288 int win_probe_recovered = 0;
4289 struct sctp_nets *net = NULL;
4292 uint8_t reneged_all = 0;
4293 uint8_t cmt_dac_flag;
4296 * we take any chance we can to service our queues since we cannot
4297 * get awoken when the socket is read from :<
4300 * Now perform the actual SACK handling: 1) Verify that it is not an
4301 * old sack, if so discard. 2) If there is nothing left in the send
4302 * queue (cum-ack is equal to last acked) then you have a duplicate
4303 * too, update any rwnd change and verify no timers are running.
4304 * then return. 3) Process any new consequtive data i.e. cum-ack
4305 * moved process these first and note that it moved. 4) Process any
4306 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4307 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4308 * sync up flightsizes and things, stop all timers and also check
4309 * for shutdown_pending state. If so then go ahead and send off the
4310 * shutdown. If in shutdown recv, send off the shutdown-ack and
4311 * start that timer, Ret. 9) Strike any non-acked things and do FR
4312 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4313 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4314 * if in shutdown_recv state.
4316 SCTP_TCB_LOCK_ASSERT(stcb);
4318 this_sack_lowest_newack = 0;
4319 SCTP_STAT_INCR(sctps_slowpath_sack);
4321 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4322 #ifdef SCTP_ASOCLOG_OF_TSNS
4323 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4324 stcb->asoc.cumack_log_at++;
4325 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4326 stcb->asoc.cumack_log_at = 0;
4331 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4332 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4333 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4335 old_rwnd = stcb->asoc.peers_rwnd;
4336 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4337 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4338 stcb->asoc.overall_error_count,
4340 SCTP_FROM_SCTP_INDATA,
4343 stcb->asoc.overall_error_count = 0;
4345 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4346 sctp_log_sack(asoc->last_acked_seq,
4353 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4355 uint32_t *dupdata, dblock;
4357 for (i = 0; i < num_dup; i++) {
4358 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4359 sizeof(uint32_t), (uint8_t *) & dblock);
4360 if (dupdata == NULL) {
4363 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4367 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4368 tp1 = TAILQ_LAST(&asoc->sent_queue,
4369 sctpchunk_listhead);
4370 send_s = tp1->rec.data.TSN_seq + 1;
4373 send_s = asoc->sending_seq;
4375 if (SCTP_TSN_GE(cum_ack, send_s)) {
4376 struct mbuf *op_err;
4377 char msg[SCTP_DIAG_INFO_LEN];
4380 * no way, we have not even sent this TSN out yet. Peer is
4381 * hopelessly messed up with us.
4383 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4386 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4387 tp1->rec.data.TSN_seq, (void *)tp1);
4392 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4394 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4395 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4396 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4399 /**********************/
4400 /* 1) check the range */
4401 /**********************/
4402 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4403 /* acking something behind */
4406 /* update the Rwnd of the peer */
4407 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4408 TAILQ_EMPTY(&asoc->send_queue) &&
4409 (asoc->stream_queue_cnt == 0)) {
4410 /* nothing left on send/sent and strmq */
4411 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4412 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4413 asoc->peers_rwnd, 0, 0, a_rwnd);
4415 asoc->peers_rwnd = a_rwnd;
4416 if (asoc->sent_queue_retran_cnt) {
4417 asoc->sent_queue_retran_cnt = 0;
4419 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4420 /* SWS sender side engages */
4421 asoc->peers_rwnd = 0;
4423 /* stop any timers */
4424 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4425 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4426 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4427 net->partial_bytes_acked = 0;
4428 net->flight_size = 0;
4430 asoc->total_flight = 0;
4431 asoc->total_flight_count = 0;
4435 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4436 * things. The total byte count acked is tracked in netAckSz AND
4437 * netAck2 is used to track the total bytes acked that are un-
4438 * amibguious and were never retransmitted. We track these on a per
4439 * destination address basis.
4441 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4442 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4443 /* Drag along the window_tsn for cwr's */
4444 net->cwr_window_tsn = cum_ack;
4446 net->prev_cwnd = net->cwnd;
4451 * CMT: Reset CUC and Fast recovery algo variables before
4454 net->new_pseudo_cumack = 0;
4455 net->will_exit_fast_recovery = 0;
4456 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4457 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4460 /* process the new consecutive TSN first */
4461 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4462 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4463 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4465 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4467 * If it is less than ACKED, it is
4468 * now no-longer in flight. Higher
4469 * values may occur during marking
4471 if ((tp1->whoTo->dest_state &
4472 SCTP_ADDR_UNCONFIRMED) &&
4473 (tp1->snd_count < 2)) {
4475 * If there was no retran
4476 * and the address is
4477 * un-confirmed and we sent
4479 * sacked.. its confirmed,
4482 tp1->whoTo->dest_state &=
4483 ~SCTP_ADDR_UNCONFIRMED;
4485 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4486 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4487 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4488 tp1->whoTo->flight_size,
4490 (uint32_t) (uintptr_t) tp1->whoTo,
4491 tp1->rec.data.TSN_seq);
4493 sctp_flight_size_decrease(tp1);
4494 sctp_total_flight_decrease(stcb, tp1);
4495 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4496 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4500 tp1->whoTo->net_ack += tp1->send_size;
4502 /* CMT SFR and DAC algos */
4503 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4504 tp1->whoTo->saw_newack = 1;
4506 if (tp1->snd_count < 2) {
4508 * True non-retransmited
4511 tp1->whoTo->net_ack2 +=
4514 /* update RTO too? */
4518 sctp_calculate_rto(stcb,
4520 &tp1->sent_rcv_time,
4521 sctp_align_safe_nocopy,
4522 SCTP_RTT_FROM_DATA);
4525 if (tp1->whoTo->rto_needed == 0) {
4526 tp1->whoTo->rto_needed = 1;
4532 * CMT: CUCv2 algorithm. From the
4533 * cumack'd TSNs, for each TSN being
4534 * acked for the first time, set the
4535 * following variables for the
4536 * corresp destination.
4537 * new_pseudo_cumack will trigger a
4539 * find_(rtx_)pseudo_cumack will
4540 * trigger search for the next
4541 * expected (rtx-)pseudo-cumack.
4543 tp1->whoTo->new_pseudo_cumack = 1;
4544 tp1->whoTo->find_pseudo_cumack = 1;
4545 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4548 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4549 sctp_log_sack(asoc->last_acked_seq,
4551 tp1->rec.data.TSN_seq,
4554 SCTP_LOG_TSN_ACKED);
4556 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4557 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4560 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4561 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4562 #ifdef SCTP_AUDITING_ENABLED
4563 sctp_audit_log(0xB3,
4564 (asoc->sent_queue_retran_cnt & 0x000000ff));
4567 if (tp1->rec.data.chunk_was_revoked) {
4568 /* deflate the cwnd */
4569 tp1->whoTo->cwnd -= tp1->book_size;
4570 tp1->rec.data.chunk_was_revoked = 0;
4572 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4573 tp1->sent = SCTP_DATAGRAM_ACKED;
4580 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4581 /* always set this up to cum-ack */
4582 asoc->this_sack_highest_gap = last_tsn;
4584 if ((num_seg > 0) || (num_nr_seg > 0)) {
4587 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4588 * to be greater than the cumack. Also reset saw_newack to 0
4591 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4592 net->saw_newack = 0;
4593 net->this_sack_highest_newack = last_tsn;
4597 * thisSackHighestGap will increase while handling NEW
4598 * segments this_sack_highest_newack will increase while
4599 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4600 * used for CMT DAC algo. saw_newack will also change.
4602 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4603 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4604 num_seg, num_nr_seg, &rto_ok)) {
4608 * validate the biggest_tsn_acked in the gap acks if strict
4609 * adherence is wanted.
4611 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4613 * peer is either confused or we are under attack.
4616 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4617 biggest_tsn_acked, send_s);
4621 /*******************************************/
4622 /* cancel ALL T3-send timer if accum moved */
4623 /*******************************************/
4624 if (asoc->sctp_cmt_on_off > 0) {
4625 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4626 if (net->new_pseudo_cumack)
4627 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4629 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4634 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4635 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4636 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4640 /********************************************/
4641 /* drop the acked chunks from the sentqueue */
4642 /********************************************/
4643 asoc->last_acked_seq = cum_ack;
4645 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4646 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4649 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4650 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4651 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4654 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4658 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4659 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4660 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4661 asoc->trigger_reset = 1;
4663 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4664 if (PR_SCTP_ENABLED(tp1->flags)) {
4665 if (asoc->pr_sctp_cnt != 0)
4666 asoc->pr_sctp_cnt--;
4668 asoc->sent_queue_cnt--;
4670 /* sa_ignore NO_NULL_CHK */
4671 sctp_free_bufspace(stcb, asoc, tp1, 1);
4672 sctp_m_freem(tp1->data);
4674 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4675 asoc->sent_queue_cnt_removeable--;
4678 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4679 sctp_log_sack(asoc->last_acked_seq,
4681 tp1->rec.data.TSN_seq,
4684 SCTP_LOG_FREE_SENT);
4686 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4689 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4691 panic("Warning flight size is positive and should be 0");
4693 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4694 asoc->total_flight);
4696 asoc->total_flight = 0;
4698 /* sa_ignore NO_NULL_CHK */
4699 if ((wake_him) && (stcb->sctp_socket)) {
4700 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4704 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4705 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4706 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4708 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4709 so = SCTP_INP_SO(stcb->sctp_ep);
4710 atomic_add_int(&stcb->asoc.refcnt, 1);
4711 SCTP_TCB_UNLOCK(stcb);
4712 SCTP_SOCKET_LOCK(so, 1);
4713 SCTP_TCB_LOCK(stcb);
4714 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4715 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4716 /* assoc was freed while we were unlocked */
4717 SCTP_SOCKET_UNLOCK(so, 1);
4721 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4722 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4723 SCTP_SOCKET_UNLOCK(so, 1);
4726 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4727 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4731 if (asoc->fast_retran_loss_recovery && accum_moved) {
4732 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4733 /* Setup so we will exit RFC2582 fast recovery */
4734 will_exit_fast_recovery = 1;
4738 * Check for revoked fragments:
4740 * if Previous sack - Had no frags then we can't have any revoked if
4741 * Previous sack - Had frag's then - If we now have frags aka
4742 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4743 * some of them. else - The peer revoked all ACKED fragments, since
4744 * we had some before and now we have NONE.
4748 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4749 asoc->saw_sack_with_frags = 1;
4750 } else if (asoc->saw_sack_with_frags) {
4751 int cnt_revoked = 0;
4753 /* Peer revoked all dg's marked or acked */
4754 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4755 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4756 tp1->sent = SCTP_DATAGRAM_SENT;
4757 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4758 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4759 tp1->whoTo->flight_size,
4761 (uint32_t) (uintptr_t) tp1->whoTo,
4762 tp1->rec.data.TSN_seq);
4764 sctp_flight_size_increase(tp1);
4765 sctp_total_flight_increase(stcb, tp1);
4766 tp1->rec.data.chunk_was_revoked = 1;
4768 * To ensure that this increase in
4769 * flightsize, which is artificial, does not
4770 * throttle the sender, we also increase the
4771 * cwnd artificially.
4773 tp1->whoTo->cwnd += tp1->book_size;
4780 asoc->saw_sack_with_frags = 0;
4783 asoc->saw_sack_with_nr_frags = 1;
4785 asoc->saw_sack_with_nr_frags = 0;
4787 /* JRS - Use the congestion control given in the CC module */
4788 if (ecne_seen == 0) {
4789 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4790 if (net->net_ack2 > 0) {
4792 * Karn's rule applies to clearing error
4793 * count, this is optional.
4795 net->error_count = 0;
4796 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4797 /* addr came good */
4798 net->dest_state |= SCTP_ADDR_REACHABLE;
4799 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4800 0, (void *)net, SCTP_SO_NOT_LOCKED);
4802 if (net == stcb->asoc.primary_destination) {
4803 if (stcb->asoc.alternate) {
4805 * release the alternate,
4808 sctp_free_remote_addr(stcb->asoc.alternate);
4809 stcb->asoc.alternate = NULL;
4812 if (net->dest_state & SCTP_ADDR_PF) {
4813 net->dest_state &= ~SCTP_ADDR_PF;
4814 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4815 stcb->sctp_ep, stcb, net,
4816 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4817 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4818 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4819 /* Done with this net */
4822 /* restore any doubled timers */
4823 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4824 if (net->RTO < stcb->asoc.minrto) {
4825 net->RTO = stcb->asoc.minrto;
4827 if (net->RTO > stcb->asoc.maxrto) {
4828 net->RTO = stcb->asoc.maxrto;
4832 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4834 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4835 /* nothing left in-flight */
4836 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4837 /* stop all timers */
4838 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4840 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4841 net->flight_size = 0;
4842 net->partial_bytes_acked = 0;
4844 asoc->total_flight = 0;
4845 asoc->total_flight_count = 0;
4847 /**********************************/
4848 /* Now what about shutdown issues */
4849 /**********************************/
4850 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4851 /* nothing left on sendqueue.. consider done */
4852 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4853 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4854 asoc->peers_rwnd, 0, 0, a_rwnd);
4856 asoc->peers_rwnd = a_rwnd;
4857 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4858 /* SWS sender side engages */
4859 asoc->peers_rwnd = 0;
4862 if ((asoc->stream_queue_cnt == 1) &&
4863 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4864 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4865 (asoc->locked_on_sending)
4867 struct sctp_stream_queue_pending *sp;
4870 * I may be in a state where we got all across.. but
4871 * cannot write more due to a shutdown... we abort
4872 * since the user did not indicate EOR in this case.
4874 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4876 if ((sp) && (sp->length == 0)) {
4877 asoc->locked_on_sending = NULL;
4878 if (sp->msg_is_complete) {
4879 asoc->stream_queue_cnt--;
4881 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4882 asoc->stream_queue_cnt--;
4886 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4887 (asoc->stream_queue_cnt == 0)) {
4888 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4889 /* Need to abort here */
4890 struct mbuf *op_err;
4895 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4896 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4897 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4900 struct sctp_nets *netp;
4902 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4903 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4904 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4906 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4907 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4908 sctp_stop_timers_for_shutdown(stcb);
4909 if (asoc->alternate) {
4910 netp = asoc->alternate;
4912 netp = asoc->primary_destination;
4914 sctp_send_shutdown(stcb, netp);
4915 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4916 stcb->sctp_ep, stcb, netp);
4917 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4918 stcb->sctp_ep, stcb, netp);
4921 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4922 (asoc->stream_queue_cnt == 0)) {
4923 struct sctp_nets *netp;
4925 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4928 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4929 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4930 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4931 sctp_stop_timers_for_shutdown(stcb);
4932 if (asoc->alternate) {
4933 netp = asoc->alternate;
4935 netp = asoc->primary_destination;
4937 sctp_send_shutdown_ack(stcb, netp);
4938 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4939 stcb->sctp_ep, stcb, netp);
4944 * Now here we are going to recycle net_ack for a different use...
4947 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4952 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4953 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4954 * automatically ensure that.
4956 if ((asoc->sctp_cmt_on_off > 0) &&
4957 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4958 (cmt_dac_flag == 0)) {
4959 this_sack_lowest_newack = cum_ack;
4961 if ((num_seg > 0) || (num_nr_seg > 0)) {
4962 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4963 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4965 /* JRS - Use the congestion control given in the CC module */
4966 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4968 /* Now are we exiting loss recovery ? */
4969 if (will_exit_fast_recovery) {
4970 /* Ok, we must exit fast recovery */
4971 asoc->fast_retran_loss_recovery = 0;
4973 if ((asoc->sat_t3_loss_recovery) &&
4974 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4975 /* end satellite t3 loss recovery */
4976 asoc->sat_t3_loss_recovery = 0;
4981 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4982 if (net->will_exit_fast_recovery) {
4983 /* Ok, we must exit fast recovery */
4984 net->fast_retran_loss_recovery = 0;
4988 /* Adjust and set the new rwnd value */
4989 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4990 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4991 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4993 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4994 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4995 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4996 /* SWS sender side engages */
4997 asoc->peers_rwnd = 0;
4999 if (asoc->peers_rwnd > old_rwnd) {
5000 win_probe_recovery = 1;
5003 * Now we must setup so we have a timer up for anyone with
5009 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5010 if (win_probe_recovery && (net->window_probe)) {
5011 win_probe_recovered = 1;
5013 * Find first chunk that was used with
5014 * window probe and clear the event. Put
5015 * it back into the send queue as if has
5018 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5019 if (tp1->window_probe) {
5020 sctp_window_probe_recovery(stcb, asoc, tp1);
5025 if (net->flight_size) {
5027 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5028 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5029 stcb->sctp_ep, stcb, net);
5031 if (net->window_probe) {
5032 net->window_probe = 0;
5035 if (net->window_probe) {
5037 * In window probes we must assure a timer
5038 * is still running there
5040 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5041 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5042 stcb->sctp_ep, stcb, net);
5045 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5046 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5048 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5053 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5054 (asoc->sent_queue_retran_cnt == 0) &&
5055 (win_probe_recovered == 0) &&
5058 * huh, this should not happen unless all packets are
5059 * PR-SCTP and marked to skip of course.
5061 if (sctp_fs_audit(asoc)) {
5062 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5063 net->flight_size = 0;
5065 asoc->total_flight = 0;
5066 asoc->total_flight_count = 0;
5067 asoc->sent_queue_retran_cnt = 0;
5068 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5069 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5070 sctp_flight_size_increase(tp1);
5071 sctp_total_flight_increase(stcb, tp1);
5072 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5073 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5080 /*********************************************/
5081 /* Here we perform PR-SCTP procedures */
5083 /*********************************************/
5084 /* C1. update advancedPeerAckPoint */
5085 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5086 asoc->advanced_peer_ack_point = cum_ack;
5088 /* C2. try to further move advancedPeerAckPoint ahead */
5089 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5090 struct sctp_tmit_chunk *lchk;
5091 uint32_t old_adv_peer_ack_point;
5093 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5094 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5095 /* C3. See if we need to send a Fwd-TSN */
5096 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5098 * ISSUE with ECN, see FWD-TSN processing.
5100 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5101 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5102 0xee, cum_ack, asoc->advanced_peer_ack_point,
5103 old_adv_peer_ack_point);
5105 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5106 send_forward_tsn(stcb, asoc);
5108 /* try to FR fwd-tsn's that get lost too */
5109 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5110 send_forward_tsn(stcb, asoc);
5115 /* Assure a timer is up */
5116 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5117 stcb->sctp_ep, stcb, lchk->whoTo);
5120 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5121 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5123 stcb->asoc.peers_rwnd,
5124 stcb->asoc.total_flight,
5125 stcb->asoc.total_output_queue_size);
5130 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5133 uint32_t cum_ack, a_rwnd;
5135 cum_ack = ntohl(cp->cumulative_tsn_ack);
5136 /* Arrange so a_rwnd does NOT change */
5137 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5139 /* Now call the express sack handling */
5140 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5144 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5145 struct sctp_stream_in *strmin)
5147 struct sctp_queued_to_read *ctl, *nctl;
5148 struct sctp_association *asoc;
5150 int need_reasm_check = 0, old;
5153 tt = strmin->last_sequence_delivered;
5154 if (asoc->idata_supported) {
5160 * First deliver anything prior to and including the stream no that
5163 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5164 if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
5165 /* this is deliverable now */
5166 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5167 if (ctl->on_strm_q) {
5168 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5169 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5170 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5171 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5174 panic("strmin: %p ctl: %p unknown %d",
5175 strmin, ctl, ctl->on_strm_q);
5180 /* subtract pending on streams */
5181 asoc->size_on_all_streams -= ctl->length;
5182 sctp_ucount_decr(asoc->cnt_on_all_streams);
5183 /* deliver it to at least the delivery-q */
5184 if (stcb->sctp_socket) {
5185 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5186 sctp_add_to_readq(stcb->sctp_ep, stcb,
5188 &stcb->sctp_socket->so_rcv,
5189 1, SCTP_READ_LOCK_HELD,
5190 SCTP_SO_NOT_LOCKED);
5193 /* Its a fragmented message */
5194 if (ctl->first_frag_seen) {
5196 * Make it so this is next to
5197 * deliver, we restore later
5199 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5200 need_reasm_check = 1;
5205 /* no more delivery now. */
5209 if (need_reasm_check) {
5212 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5213 if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
5214 /* Restore the next to deliver unless we are ahead */
5215 strmin->last_sequence_delivered = tt;
5218 /* Left the front Partial one on */
5221 need_reasm_check = 0;
5224 * now we must deliver things in queue the normal way if any are
5227 tt = strmin->last_sequence_delivered + 1;
5228 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5229 if (tt == ctl->sinfo_ssn) {
5230 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5231 /* this is deliverable now */
5232 if (ctl->on_strm_q) {
5233 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5234 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5235 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5236 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5239 panic("strmin: %p ctl: %p unknown %d",
5240 strmin, ctl, ctl->on_strm_q);
5245 /* subtract pending on streams */
5246 asoc->size_on_all_streams -= ctl->length;
5247 sctp_ucount_decr(asoc->cnt_on_all_streams);
5248 /* deliver it to at least the delivery-q */
5249 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5250 if (stcb->sctp_socket) {
5251 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5252 sctp_add_to_readq(stcb->sctp_ep, stcb,
5254 &stcb->sctp_socket->so_rcv, 1,
5255 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5258 tt = strmin->last_sequence_delivered + 1;
5260 /* Its a fragmented message */
5261 if (ctl->first_frag_seen) {
5263 * Make it so this is next to
5266 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5267 need_reasm_check = 1;
5275 if (need_reasm_check) {
5276 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5281 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5282 struct sctp_association *asoc,
5283 uint16_t stream, uint32_t seq)
5285 struct sctp_queued_to_read *control;
5286 struct sctp_stream_in *strm;
5287 struct sctp_tmit_chunk *chk, *nchk;
5290 * For now large messages held on the stream reasm that are complete
5291 * will be tossed too. We could in theory do more work to spin
5292 * through and stop after dumping one msg aka seeing the start of a
5293 * new msg at the head, and call the delivery function... to see if
5294 * it can be delivered... But for now we just dump everything on the
5297 strm = &asoc->strmin[stream];
5298 control = find_reasm_entry(strm, (uint32_t) seq, 0, 0);
5299 if (control == NULL) {
5303 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5304 /* Purge hanging chunks */
5305 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5306 asoc->size_on_reasm_queue -= chk->send_size;
5307 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5309 sctp_m_freem(chk->data);
5312 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5314 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5315 if (control->on_read_q == 0) {
5316 sctp_free_remote_addr(control->whoFrom);
5317 if (control->data) {
5318 sctp_m_freem(control->data);
5319 control->data = NULL;
5321 sctp_free_a_readq(stcb, control);
5327 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5328 struct sctp_forward_tsn_chunk *fwd,
5329 int *abort_flag, struct mbuf *m, int offset)
5331 /* The pr-sctp fwd tsn */
5333 * here we will perform all the data receiver side steps for
5334 * processing FwdTSN, as required in by pr-sctp draft:
5336 * Assume we get FwdTSN(x):
5338 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5339 * others we have 3) examine and update re-ordering queue on
5340 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5341 * report where we are.
5343 struct sctp_association *asoc;
5344 uint32_t new_cum_tsn, gap;
5345 unsigned int i, fwd_sz, m_size;
5347 struct sctp_stream_in *strm;
5348 struct sctp_queued_to_read *ctl, *sv;
5351 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5352 SCTPDBG(SCTP_DEBUG_INDATA1,
5353 "Bad size too small/big fwd-tsn\n");
5356 m_size = (stcb->asoc.mapping_array_size << 3);
5357 /*************************************************************/
5358 /* 1. Here we update local cumTSN and shift the bitmap array */
5359 /*************************************************************/
5360 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5362 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5363 /* Already got there ... */
5367 * now we know the new TSN is more advanced, let's find the actual
5370 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5371 asoc->cumulative_tsn = new_cum_tsn;
5372 if (gap >= m_size) {
5373 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5374 struct mbuf *op_err;
5375 char msg[SCTP_DIAG_INFO_LEN];
5378 * out of range (of single byte chunks in the rwnd I
5379 * give out). This must be an attacker.
5382 snprintf(msg, sizeof(msg),
5383 "New cum ack %8.8x too high, highest TSN %8.8x",
5384 new_cum_tsn, asoc->highest_tsn_inside_map);
5385 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5386 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5387 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5390 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5392 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5393 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5394 asoc->highest_tsn_inside_map = new_cum_tsn;
5396 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5397 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5400 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5403 SCTP_TCB_LOCK_ASSERT(stcb);
5404 for (i = 0; i <= gap; i++) {
5405 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5406 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5407 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5408 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5409 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5414 /*************************************************************/
5415 /* 2. Clear up re-assembly queue */
5416 /*************************************************************/
5418 /* This is now done as part of clearing up the stream/seq */
5420 /*******************************************************/
5421 /* 3. Update the PR-stream re-ordering queues and fix */
5422 /* delivery issues as needed. */
5423 /*******************************************************/
5424 fwd_sz -= sizeof(*fwd);
5427 unsigned int num_str;
5431 struct sctp_strseq *stseq, strseqbuf;
5432 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5434 offset += sizeof(*fwd);
5436 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5437 if (asoc->idata_supported) {
5438 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5441 num_str = fwd_sz / sizeof(struct sctp_strseq);
5444 for (i = 0; i < num_str; i++) {
5445 if (asoc->idata_supported) {
5446 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5447 sizeof(struct sctp_strseq_mid),
5448 (uint8_t *) & strseqbuf_m);
5449 offset += sizeof(struct sctp_strseq_mid);
5450 if (stseq_m == NULL) {
5453 stream = ntohs(stseq_m->stream);
5454 sequence = ntohl(stseq_m->msg_id);
5456 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5457 sizeof(struct sctp_strseq),
5458 (uint8_t *) & strseqbuf);
5459 offset += sizeof(struct sctp_strseq);
5460 if (stseq == NULL) {
5463 stream = ntohs(stseq->stream);
5464 sequence = (uint32_t) ntohs(stseq->sequence);
5471 * Ok we now look for the stream/seq on the read
5472 * queue where its not all delivered. If we find it
5473 * we transmute the read entry into a PDI_ABORTED.
5475 if (stream >= asoc->streamincnt) {
5476 /* screwed up streams, stop! */
5479 if ((asoc->str_of_pdapi == stream) &&
5480 (asoc->ssn_of_pdapi == sequence)) {
5482 * If this is the one we were partially
5483 * delivering now then we no longer are.
5484 * Note this will change with the reassembly
5487 asoc->fragmented_delivery_inprogress = 0;
5489 strm = &asoc->strmin[stream];
5490 sctp_flush_reassm_for_str_seq(stcb, asoc, stream, sequence);
5491 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5492 if ((ctl->sinfo_stream == stream) &&
5493 (ctl->sinfo_ssn == sequence)) {
5494 str_seq = (stream << 16) | (0x0000ffff & sequence);
5495 ctl->pdapi_aborted = 1;
5496 sv = stcb->asoc.control_pdapi;
5498 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5499 TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5500 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5501 TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5503 } else if (ctl->on_strm_q) {
5504 panic("strm: %p ctl: %p unknown %d",
5505 strm, ctl, ctl->on_strm_q);
5509 stcb->asoc.control_pdapi = ctl;
5510 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5512 SCTP_PARTIAL_DELIVERY_ABORTED,
5514 SCTP_SO_NOT_LOCKED);
5515 stcb->asoc.control_pdapi = sv;
5517 } else if ((ctl->sinfo_stream == stream) &&
5518 SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
5519 /* We are past our victim SSN */
5523 if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
5524 /* Update the sequence number */
5525 strm->last_sequence_delivered = sequence;
5527 /* now kick the stream the new way */
5528 /* sa_ignore NO_NULL_CHK */
5529 sctp_kick_prsctp_reorder_queue(stcb, strm);
5531 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5534 * Now slide thing forward.
5536 sctp_slide_mapping_arrays(stcb);