2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
54 * NOTES: On the outbound side of things I need to check the sack timer to
55 * see if I should generate a sack into the chunk queue (if I have data to
56 * send that is and will be sending it .. for bundling.
58 * The callback in sctp_usrreq.c will get called when the socket is read from.
59 * This will cause sctp_service_queues() to get called on the top entry in
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64 struct sctp_stream_in *strm,
65 struct sctp_tcb *stcb,
66 struct sctp_association *asoc,
67 struct sctp_tmit_chunk *chk, int lock_held);
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 /* Calculate what the rwnd would be */
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
83 * This is really set wrong with respect to a 1-2-m socket. Since
84 * the sb_cc is the count that everyone as put up. When we re-write
85 * sctp_soreceive then we will fix this so that ONLY this
86 * associations data is taken into account.
88 if (stcb->sctp_socket == NULL) {
91 if (stcb->asoc.sb_cc == 0 &&
92 asoc->size_on_reasm_queue == 0 &&
93 asoc->size_on_all_streams == 0) {
94 /* Full rwnd granted */
95 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
98 /* get actual space */
99 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
101 * take out what has NOT been put on socket queue and we yet hold
104 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 asoc->cnt_on_reasm_queue * MSIZE));
106 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 asoc->cnt_on_all_streams * MSIZE));
112 /* what is the overhead of all these rwnd's */
113 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
115 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 * even it is 0. SWS engaged
118 if (calc < stcb->asoc.my_rwnd_control_len) {
127 * Build out our readq entry based on the incoming packet.
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131 struct sctp_nets *net,
132 uint32_t tsn, uint32_t ppid,
133 uint32_t context, uint16_t stream_no,
134 uint32_t stream_seq, uint8_t flags,
137 struct sctp_queued_to_read *read_queue_e = NULL;
139 sctp_alloc_a_readq(stcb, read_queue_e);
140 if (read_queue_e == NULL) {
143 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 read_queue_e->sinfo_stream = stream_no;
145 read_queue_e->sinfo_ssn = stream_seq;
146 read_queue_e->sinfo_flags = (flags << 8);
147 read_queue_e->sinfo_ppid = ppid;
148 read_queue_e->sinfo_context = context;
149 read_queue_e->sinfo_tsn = tsn;
150 read_queue_e->sinfo_cumtsn = tsn;
151 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
152 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 TAILQ_INIT(&read_queue_e->reasm);
154 read_queue_e->whoFrom = net;
155 atomic_add_int(&net->ref_count, 1);
156 read_queue_e->data = dm;
157 read_queue_e->stcb = stcb;
158 read_queue_e->port_from = stcb->rport;
160 return (read_queue_e);
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
166 struct sctp_extrcvinfo *seinfo;
167 struct sctp_sndrcvinfo *outinfo;
168 struct sctp_rcvinfo *rcvinfo;
169 struct sctp_nxtinfo *nxtinfo;
176 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 /* user does not want any ancillary data */
183 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
186 seinfo = (struct sctp_extrcvinfo *)sinfo;
187 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
190 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
197 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
200 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
206 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
211 SCTP_BUF_LEN(ret) = 0;
213 /* We need a CMSG header followed by the struct */
214 cmh = mtod(ret, struct cmsghdr *);
216 * Make sure that there is no un-initialized padding between the
217 * cmsg header and cmsg data and after the cmsg data.
220 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 cmh->cmsg_level = IPPROTO_SCTP;
222 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 cmh->cmsg_type = SCTP_RCVINFO;
224 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 rcvinfo->rcv_context = sinfo->sinfo_context;
232 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
237 cmh->cmsg_level = IPPROTO_SCTP;
238 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 cmh->cmsg_type = SCTP_NXTINFO;
240 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 nxtinfo->nxt_flags = 0;
243 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 nxtinfo->nxt_flags |= SCTP_UNORDERED;
246 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 nxtinfo->nxt_flags |= SCTP_COMPLETE;
252 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 cmh->cmsg_level = IPPROTO_SCTP;
260 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
262 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 cmh->cmsg_type = SCTP_EXTRCV;
264 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 cmh->cmsg_type = SCTP_SNDRCV;
270 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
280 uint32_t gap, i, cumackp1;
282 int in_r = 0, in_nr = 0;
284 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
287 cumackp1 = asoc->cumulative_tsn + 1;
288 if (SCTP_TSN_GT(cumackp1, tsn)) {
290 * this tsn is behind the cum ack and thus we don't need to
291 * worry about it being moved from one to the other.
295 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 if ((in_r == 0) && (in_nr == 0)) {
300 panic("Things are really messed up now");
302 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 sctp_print_mapping_array(asoc);
307 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
309 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 asoc->highest_tsn_inside_nr_map = tsn;
313 if (tsn == asoc->highest_tsn_inside_map) {
314 /* We must back down to see what the new highest is */
315 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 asoc->highest_tsn_inside_map = i;
324 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331 struct sctp_association *asoc,
332 struct sctp_queued_to_read *control)
334 struct sctp_queued_to_read *at;
335 struct sctp_readhead *q;
336 uint8_t bits, unordered;
338 bits = (control->sinfo_flags >> 8);
339 unordered = bits & SCTP_DATA_UNORDERED;
341 q = &strm->uno_inqueue;
342 if (asoc->idata_supported == 0) {
343 if (!TAILQ_EMPTY(q)) {
344 /* Only one stream can be here in old style
348 TAILQ_INSERT_TAIL(q, control, next_instrm);
349 control->on_strm_q = SCTP_ON_UNORDERED;
355 if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
356 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
358 if (TAILQ_EMPTY(q)) {
360 TAILQ_INSERT_HEAD(q, control, next_instrm);
362 control->on_strm_q = SCTP_ON_UNORDERED;
364 control->on_strm_q = SCTP_ON_ORDERED;
368 TAILQ_FOREACH(at, q, next_instrm) {
369 if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
371 * one in queue is bigger than the new one,
372 * insert before this one
374 TAILQ_INSERT_BEFORE(at, control, next_instrm);
376 control->on_strm_q = SCTP_ON_UNORDERED;
378 control->on_strm_q = SCTP_ON_ORDERED;
381 } else if (at->msg_id == control->msg_id) {
383 * Gak, He sent me a duplicate msg id
384 * number?? return -1 to abort.
388 if (TAILQ_NEXT(at, next_instrm) == NULL) {
390 * We are at the end, insert it
393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
394 sctp_log_strm_del(control, at,
395 SCTP_STR_LOG_FROM_INSERT_TL);
397 TAILQ_INSERT_AFTER(q,
398 at, control, next_instrm);
400 control->on_strm_q = SCTP_ON_UNORDERED;
402 control->on_strm_q = SCTP_ON_ORDERED;
413 sctp_abort_in_reasm(struct sctp_tcb *stcb,
414 struct sctp_queued_to_read *control,
415 struct sctp_tmit_chunk *chk,
416 int *abort_flag, int opspot)
418 char msg[SCTP_DIAG_INFO_LEN];
421 if (stcb->asoc.idata_supported) {
422 snprintf(msg, sizeof(msg),
423 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
425 control->fsn_included,
426 chk->rec.data.TSN_seq,
427 chk->rec.data.stream_number,
428 chk->rec.data.fsn_num, chk->rec.data.stream_seq);
430 snprintf(msg, sizeof(msg),
431 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
433 control->fsn_included,
434 chk->rec.data.TSN_seq,
435 chk->rec.data.stream_number,
436 chk->rec.data.fsn_num,
437 (uint16_t) chk->rec.data.stream_seq);
439 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
440 sctp_m_freem(chk->data);
442 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
443 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
444 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
449 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
452 * The control could not be placed and must be cleaned.
454 struct sctp_tmit_chunk *chk, *nchk;
456 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
457 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
459 sctp_m_freem(chk->data);
461 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
463 sctp_free_a_readq(stcb, control);
467 * Queue the chunk either right into the socket buffer if it is the next one
468 * to go OR put it in the correct place in the delivery queue. If we do
469 * append to the so_buf, keep doing so until we are out of order as
470 * long as the control's entered are non-fragmented.
473 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
474 struct sctp_stream_in *strm,
475 struct sctp_association *asoc,
476 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
479 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
480 * all the data in one stream this could happen quite rapidly. One
481 * could use the TSN to keep track of things, but this scheme breaks
482 * down in the other type of stream usage that could occur. Send a
483 * single msg to stream 0, send 4Billion messages to stream 1, now
484 * send a message to stream 0. You have a situation where the TSN
485 * has wrapped but not in the stream. Is this worth worrying about
486 * or should we just change our queue sort at the bottom to be by
489 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
490 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
491 * assignment this could happen... and I don't see how this would be
492 * a violation. So for now I am undecided an will leave the sort by
493 * SSN alone. Maybe a hybred approach is the answer
496 struct sctp_queued_to_read *at;
500 char msg[SCTP_DIAG_INFO_LEN];
502 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
503 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
505 if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
506 /* The incoming sseq is behind where we last delivered? */
507 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
508 control->sinfo_ssn, strm->last_sequence_delivered);
511 * throw it in the stream so it gets cleaned up in
512 * association destruction
514 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
515 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
516 strm->last_sequence_delivered, control->sinfo_tsn,
517 control->sinfo_stream, control->sinfo_ssn);
518 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
519 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
520 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
525 if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
529 asoc->size_on_all_streams += control->length;
530 sctp_ucount_incr(asoc->cnt_on_all_streams);
531 nxt_todel = strm->last_sequence_delivered + 1;
532 if (nxt_todel == control->sinfo_ssn) {
533 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
536 so = SCTP_INP_SO(stcb->sctp_ep);
537 atomic_add_int(&stcb->asoc.refcnt, 1);
538 SCTP_TCB_UNLOCK(stcb);
539 SCTP_SOCKET_LOCK(so, 1);
541 atomic_subtract_int(&stcb->asoc.refcnt, 1);
542 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
543 SCTP_SOCKET_UNLOCK(so, 1);
547 /* can be delivered right away? */
548 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
549 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
551 /* EY it wont be queued if it could be delivered directly */
553 asoc->size_on_all_streams -= control->length;
554 sctp_ucount_decr(asoc->cnt_on_all_streams);
555 strm->last_sequence_delivered++;
556 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
557 sctp_add_to_readq(stcb->sctp_ep, stcb,
559 &stcb->sctp_socket->so_rcv, 1,
560 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
561 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
563 nxt_todel = strm->last_sequence_delivered + 1;
564 if ((nxt_todel == control->sinfo_ssn) &&
565 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
566 asoc->size_on_all_streams -= control->length;
567 sctp_ucount_decr(asoc->cnt_on_all_streams);
568 if (control->on_strm_q == SCTP_ON_ORDERED) {
569 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
572 panic("Huh control: %p is on_strm_q: %d",
573 control, control->on_strm_q);
576 control->on_strm_q = 0;
577 strm->last_sequence_delivered++;
579 * We ignore the return of deliver_data here
580 * since we always can hold the chunk on the
581 * d-queue. And we have a finite number that
582 * can be delivered from the strq.
584 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
585 sctp_log_strm_del(control, NULL,
586 SCTP_STR_LOG_FROM_IMMED_DEL);
588 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
589 sctp_add_to_readq(stcb->sctp_ep, stcb,
591 &stcb->sctp_socket->so_rcv, 1,
592 SCTP_READ_LOCK_NOT_HELD,
595 } else if (nxt_todel == control->sinfo_ssn) {
600 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
601 SCTP_SOCKET_UNLOCK(so, 1);
606 * Ok, we did not deliver this guy, find the correct place
607 * to put it on the queue.
609 if (sctp_place_control_in_stream(strm, asoc, control)) {
610 snprintf(msg, sizeof(msg),
611 "Queue to str msg_id: %u duplicate",
613 sctp_clean_up_control(stcb, control);
614 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
615 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
616 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
624 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
626 struct mbuf *m, *prev = NULL;
627 struct sctp_tcb *stcb;
629 stcb = control->stcb;
630 control->held_length = 0;
634 if (SCTP_BUF_LEN(m) == 0) {
635 /* Skip mbufs with NO length */
638 control->data = sctp_m_free(m);
641 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
642 m = SCTP_BUF_NEXT(prev);
645 control->tail_mbuf = prev;
650 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
651 if (control->on_read_q) {
653 * On read queue so we must increment the SB stuff,
654 * we assume caller has done any locks of SB.
656 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
658 m = SCTP_BUF_NEXT(m);
661 control->tail_mbuf = prev;
666 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
668 struct mbuf *prev = NULL;
669 struct sctp_tcb *stcb;
671 stcb = control->stcb;
674 panic("Control broken");
679 if (control->tail_mbuf == NULL) {
682 sctp_setup_tail_pointer(control);
685 control->tail_mbuf->m_next = m;
687 if (SCTP_BUF_LEN(m) == 0) {
688 /* Skip mbufs with NO length */
691 control->tail_mbuf->m_next = sctp_m_free(m);
692 m = control->tail_mbuf->m_next;
694 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
695 m = SCTP_BUF_NEXT(prev);
698 control->tail_mbuf = prev;
703 if (control->on_read_q) {
705 * On read queue so we must increment the SB stuff,
706 * we assume caller has done any locks of SB.
708 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
710 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
711 m = SCTP_BUF_NEXT(m);
714 control->tail_mbuf = prev;
719 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
721 memset(nc, 0, sizeof(struct sctp_queued_to_read));
722 nc->sinfo_stream = control->sinfo_stream;
723 nc->sinfo_ssn = control->sinfo_ssn;
724 TAILQ_INIT(&nc->reasm);
725 nc->top_fsn = control->top_fsn;
726 nc->msg_id = control->msg_id;
727 nc->sinfo_flags = control->sinfo_flags;
728 nc->sinfo_ppid = control->sinfo_ppid;
729 nc->sinfo_context = control->sinfo_context;
730 nc->fsn_included = 0xffffffff;
731 nc->sinfo_tsn = control->sinfo_tsn;
732 nc->sinfo_cumtsn = control->sinfo_cumtsn;
733 nc->sinfo_assoc_id = control->sinfo_assoc_id;
734 nc->whoFrom = control->whoFrom;
735 atomic_add_int(&nc->whoFrom->ref_count, 1);
736 nc->stcb = control->stcb;
737 nc->port_from = control->port_from;
741 sctp_reset_a_control(struct sctp_queued_to_read *control,
742 struct sctp_inpcb *inp, uint32_t tsn)
744 control->fsn_included = tsn;
745 if (control->on_read_q) {
747 * We have to purge it from there, hopefully this will work
750 TAILQ_REMOVE(&inp->read_queue, control, next);
751 control->on_read_q = 0;
756 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
757 struct sctp_association *asoc,
758 struct sctp_stream_in *strm,
759 struct sctp_queued_to_read *control,
761 int inp_read_lock_held)
764 * Special handling for the old un-ordered data chunk. All the
765 * chunks/TSN's go to msg_id 0. So we have to do the old style
766 * watching to see if we have it all. If you return one, no other
767 * control entries on the un-ordered queue will be looked at. In
768 * theory there should be no others entries in reality, unless the
769 * guy is sending both unordered NDATA and unordered DATA...
771 struct sctp_tmit_chunk *chk, *lchk, *tchk;
773 struct sctp_queued_to_read *nc;
776 if (control->first_frag_seen == 0) {
777 /* Nothing we can do, we have not seen the first piece yet */
780 /* Collapse any we can */
783 fsn = control->fsn_included + 1;
784 /* Now what can we add? */
785 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
786 if (chk->rec.data.fsn_num == fsn) {
788 sctp_alloc_a_readq(stcb, nc);
792 memset(nc, 0, sizeof(struct sctp_queued_to_read));
793 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
794 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
798 if (control->end_added) {
800 if (!TAILQ_EMPTY(&control->reasm)) {
802 * Ok we have to move anything left
803 * on the control queue to a new
806 sctp_build_readq_entry_from_ctl(nc, control);
807 tchk = TAILQ_FIRST(&control->reasm);
808 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
809 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
810 asoc->size_on_reasm_queue -= tchk->send_size;
811 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
812 nc->first_frag_seen = 1;
813 nc->fsn_included = tchk->rec.data.fsn_num;
814 nc->data = tchk->data;
815 nc->sinfo_ppid = tchk->rec.data.payloadtype;
816 nc->sinfo_tsn = tchk->rec.data.TSN_seq;
817 sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
819 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
820 sctp_setup_tail_pointer(nc);
821 tchk = TAILQ_FIRST(&control->reasm);
823 /* Spin the rest onto the queue */
825 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
826 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
827 tchk = TAILQ_FIRST(&control->reasm);
829 /* Now lets add it to the queue
830 * after removing control */
831 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
832 nc->on_strm_q = SCTP_ON_UNORDERED;
833 if (control->on_strm_q) {
834 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
835 control->on_strm_q = 0;
838 if (control->pdapi_started) {
839 strm->pd_api_started = 0;
840 control->pdapi_started = 0;
842 if (control->on_strm_q) {
843 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
844 control->on_strm_q = 0;
845 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
847 if (control->on_read_q == 0) {
848 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
849 &stcb->sctp_socket->so_rcv, control->end_added,
850 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
852 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
853 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
854 /* Switch to the new guy and
859 if (nc->on_strm_q == 0) {
860 sctp_free_a_readq(stcb, nc);
865 sctp_free_a_readq(stcb, nc);
872 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
873 strm->pd_api_started = 1;
874 control->pdapi_started = 1;
875 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
876 &stcb->sctp_socket->so_rcv, control->end_added,
877 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
878 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
886 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
887 struct sctp_association *asoc,
888 struct sctp_queued_to_read *control,
889 struct sctp_tmit_chunk *chk,
892 struct sctp_tmit_chunk *at;
896 * Here we need to place the chunk into the control structure sorted
897 * in the correct order.
899 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
900 /* Its the very first one. */
901 SCTPDBG(SCTP_DEBUG_XXX,
902 "chunk is a first fsn: %u becomes fsn_included\n",
903 chk->rec.data.fsn_num);
904 if (control->first_frag_seen) {
906 * In old un-ordered we can reassembly on one
907 * control multiple messages. As long as the next
908 * FIRST is greater then the old first (TSN i.e. FSN
914 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
915 /* Easy way the start of a new guy beyond
919 if ((chk->rec.data.fsn_num == control->fsn_included) ||
920 (control->pdapi_started)) {
922 * Ok this should not happen, if it does we
923 * started the pd-api on the higher TSN
924 * (since the equals part is a TSN failure
927 * We are completly hosed in that case since
928 * I have no way to recover. This really
929 * will only happen if we can get more TSN's
930 * higher before the pd-api-point.
932 sctp_abort_in_reasm(stcb, control, chk,
934 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
939 * Ok we have two firsts and the one we just got is
940 * smaller than the one we previously placed.. yuck!
941 * We must swap them out.
944 tdata = control->data;
945 control->data = chk->data;
947 /* Save the lengths */
948 chk->send_size = control->length;
949 /* Recompute length of control and tail pointer */
950 sctp_setup_tail_pointer(control);
951 /* Fix the FSN included */
952 tmp = control->fsn_included;
953 control->fsn_included = chk->rec.data.fsn_num;
954 chk->rec.data.fsn_num = tmp;
955 /* Fix the TSN included */
956 tmp = control->sinfo_tsn;
957 control->sinfo_tsn = chk->rec.data.TSN_seq;
958 chk->rec.data.TSN_seq = tmp;
959 /* Fix the PPID included */
960 tmp = control->sinfo_ppid;
961 control->sinfo_ppid = chk->rec.data.payloadtype;
962 chk->rec.data.payloadtype = tmp;
963 /* Fix tail pointer */
966 control->first_frag_seen = 1;
967 control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
968 control->sinfo_tsn = chk->rec.data.TSN_seq;
969 control->sinfo_ppid = chk->rec.data.payloadtype;
970 control->data = chk->data;
971 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
973 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
974 sctp_setup_tail_pointer(control);
979 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
980 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
982 * This one in queue is bigger than the new one,
983 * insert the new one before at.
985 asoc->size_on_reasm_queue += chk->send_size;
986 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
988 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
990 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
992 * They sent a duplicate fsn number. This really
993 * should not happen since the FSN is a TSN and it
994 * should have been dropped earlier.
996 sctp_abort_in_reasm(stcb, control, chk,
998 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1002 if (inserted == 0) {
1003 /* Its at the end */
1004 asoc->size_on_reasm_queue += chk->send_size;
1005 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1006 control->top_fsn = chk->rec.data.fsn_num;
1007 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1012 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1013 struct sctp_stream_in *strm, int inp_read_lock_held)
1016 * Given a stream, strm, see if any of the SSN's on it that are
1017 * fragmented are ready to deliver. If so go ahead and place them on
1018 * the read queue. In so placing if we have hit the end, then we
1019 * need to remove them from the stream's queue.
1021 struct sctp_queued_to_read *control, *nctl = NULL;
1022 uint32_t next_to_del;
1026 if (stcb->sctp_socket) {
1027 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1028 stcb->sctp_ep->partial_delivery_point);
1030 pd_point = stcb->sctp_ep->partial_delivery_point;
1032 control = TAILQ_FIRST(&strm->uno_inqueue);
1035 (asoc->idata_supported == 0)) {
1036 /* Special handling needed for "old" data format */
1037 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1041 if (strm->pd_api_started) {
1042 /* Can't add more */
1046 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1047 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
1048 nctl = TAILQ_NEXT(control, next_instrm);
1049 if (control->end_added) {
1050 /* We just put the last bit on */
1051 if (control->on_strm_q) {
1053 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1054 panic("Huh control: %p on_q: %d -- not unordered?",
1055 control, control->on_strm_q);
1058 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1059 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1060 control->on_strm_q = 0;
1062 if (control->on_read_q == 0) {
1063 sctp_add_to_readq(stcb->sctp_ep, stcb,
1065 &stcb->sctp_socket->so_rcv, control->end_added,
1066 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1069 /* Can we do a PD-API for this un-ordered guy? */
1070 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1071 strm->pd_api_started = 1;
1072 control->pdapi_started = 1;
1073 sctp_add_to_readq(stcb->sctp_ep, stcb,
1075 &stcb->sctp_socket->so_rcv, control->end_added,
1076 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1084 control = TAILQ_FIRST(&strm->inqueue);
1085 if (strm->pd_api_started) {
1086 /* Can't add more */
1089 if (control == NULL) {
1092 if (strm->last_sequence_delivered == control->sinfo_ssn) {
1094 * Ok the guy at the top was being partially delivered
1095 * completed, so we remove it. Note the pd_api flag was
1096 * taken off when the chunk was merged on in
1097 * sctp_queue_data_for_reasm below.
1099 nctl = TAILQ_NEXT(control, next_instrm);
1100 SCTPDBG(SCTP_DEBUG_XXX,
1101 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1102 control, control->end_added, control->sinfo_ssn,
1103 control->top_fsn, control->fsn_included,
1104 strm->last_sequence_delivered);
1105 if (control->end_added) {
1106 if (control->on_strm_q) {
1108 if (control->on_strm_q != SCTP_ON_ORDERED) {
1109 panic("Huh control: %p on_q: %d -- not ordered?",
1110 control, control->on_strm_q);
1113 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1114 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1115 control->on_strm_q = 0;
1117 if (strm->pd_api_started && control->pdapi_started) {
1118 control->pdapi_started = 0;
1119 strm->pd_api_started = 0;
1121 if (control->on_read_q == 0) {
1122 sctp_add_to_readq(stcb->sctp_ep, stcb,
1124 &stcb->sctp_socket->so_rcv, control->end_added,
1125 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1130 if (strm->pd_api_started) {
1131 /* Can't add more must have gotten an un-ordered above being
1132 * partially delivered. */
1136 next_to_del = strm->last_sequence_delivered + 1;
1138 SCTPDBG(SCTP_DEBUG_XXX,
1139 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1140 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
1142 nctl = TAILQ_NEXT(control, next_instrm);
1143 if ((control->sinfo_ssn == next_to_del) &&
1144 (control->first_frag_seen)) {
1147 /* Ok we can deliver it onto the stream. */
1148 if (control->end_added) {
1149 /* We are done with it afterwards */
1150 if (control->on_strm_q) {
1152 if (control->on_strm_q != SCTP_ON_ORDERED) {
1153 panic("Huh control: %p on_q: %d -- not ordered?",
1154 control, control->on_strm_q);
1157 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1158 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1159 control->on_strm_q = 0;
1163 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1164 /* A singleton now slipping through - mark
1165 * it non-revokable too */
1166 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1167 } else if (control->end_added == 0) {
1168 /* Check if we can defer adding until its
1170 if ((control->length < pd_point) || (strm->pd_api_started)) {
1171 /* Don't need it or cannot add more
1172 * (one being delivered that way) */
1176 done = (control->end_added) && (control->last_frag_seen);
1177 if (control->on_read_q == 0) {
1178 sctp_add_to_readq(stcb->sctp_ep, stcb,
1180 &stcb->sctp_socket->so_rcv, control->end_added,
1181 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1183 strm->last_sequence_delivered = next_to_del;
1188 /* We are now doing PD API */
1189 strm->pd_api_started = 1;
1190 control->pdapi_started = 1;
1200 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1201 struct sctp_stream_in *strm,
1202 struct sctp_tcb *stcb, struct sctp_association *asoc,
1203 struct sctp_tmit_chunk *chk, int hold_rlock)
1206 * Given a control and a chunk, merge the data from the chk onto the
1207 * control and free up the chunk resources.
1211 if (control->on_read_q && (hold_rlock == 0)) {
1213 * Its being pd-api'd so we must do some locks.
1215 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1218 if (control->data == NULL) {
1219 control->data = chk->data;
1220 sctp_setup_tail_pointer(control);
1222 sctp_add_to_tail_pointer(control, chk->data);
1224 control->fsn_included = chk->rec.data.fsn_num;
1225 asoc->size_on_reasm_queue -= chk->send_size;
1226 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1227 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1229 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1230 control->first_frag_seen = 1;
1232 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1234 if ((control->on_strm_q) && (control->on_read_q)) {
1235 if (control->pdapi_started) {
1236 control->pdapi_started = 0;
1237 strm->pd_api_started = 0;
1239 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1241 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1242 control->on_strm_q = 0;
1243 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1245 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1246 control->on_strm_q = 0;
1248 } else if (control->on_strm_q) {
1249 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1250 control->on_strm_q);
1254 control->end_added = 1;
1255 control->last_frag_seen = 1;
1258 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1260 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1264 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1265 * queue, see if anthing can be delivered. If so pull it off (or as much as
1266 * we can. If we run out of space then we must dump what we can and set the
1267 * appropriate flag to say we queued what we could.
1270 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1271 struct sctp_stream_in *strm,
1272 struct sctp_queued_to_read *control,
1273 struct sctp_tmit_chunk *chk,
1274 int created_control,
1275 int *abort_flag, uint32_t tsn)
1278 struct sctp_tmit_chunk *at, *nat;
1279 int do_wakeup, unordered;
1282 * For old un-ordered data chunks.
1284 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1289 /* Must be added to the stream-in queue */
1290 if (created_control) {
1291 if (sctp_place_control_in_stream(strm, asoc, control)) {
1292 /* Duplicate SSN? */
1293 sctp_clean_up_control(stcb, control);
1294 sctp_abort_in_reasm(stcb, control, chk,
1296 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1299 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1301 * Ok we created this control and now lets validate
1302 * that its legal i.e. there is a B bit set, if not
1303 * and we have up to the cum-ack then its invalid.
1305 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1306 sctp_abort_in_reasm(stcb, control, chk,
1308 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1313 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1314 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1318 * Ok we must queue the chunk into the reasembly portion: o if its
1319 * the first it goes to the control mbuf. o if its not first but the
1320 * next in sequence it goes to the control, and each succeeding one
1321 * in order also goes. o if its not in order we place it on the list
1324 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1325 /* Its the very first one. */
1326 SCTPDBG(SCTP_DEBUG_XXX,
1327 "chunk is a first fsn: %u becomes fsn_included\n",
1328 chk->rec.data.fsn_num);
1329 if (control->first_frag_seen) {
1331 * Error on senders part, they either sent us two
1332 * data chunks with FIRST, or they sent two
1333 * un-ordered chunks that were fragmented at the
1334 * same time in the same stream.
1336 sctp_abort_in_reasm(stcb, control, chk,
1338 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1341 control->first_frag_seen = 1;
1342 control->fsn_included = chk->rec.data.fsn_num;
1343 control->data = chk->data;
1344 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1346 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1347 sctp_setup_tail_pointer(control);
1349 /* Place the chunk in our list */
1352 if (control->last_frag_seen == 0) {
1353 /* Still willing to raise highest FSN seen */
1354 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1355 SCTPDBG(SCTP_DEBUG_XXX,
1356 "We have a new top_fsn: %u\n",
1357 chk->rec.data.fsn_num);
1358 control->top_fsn = chk->rec.data.fsn_num;
1360 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1361 SCTPDBG(SCTP_DEBUG_XXX,
1362 "The last fsn is now in place fsn: %u\n",
1363 chk->rec.data.fsn_num);
1364 control->last_frag_seen = 1;
1366 if (asoc->idata_supported || control->first_frag_seen) {
1368 * For IDATA we always check since we know
1369 * that the first fragment is 0. For old
1370 * DATA we have to receive the first before
1371 * we know the first FSN (which is the TSN).
1373 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1374 /* We have already delivered up to
1375 * this so its a dup */
1376 sctp_abort_in_reasm(stcb, control, chk,
1378 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1383 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1384 /* Second last? huh? */
1385 SCTPDBG(SCTP_DEBUG_XXX,
1386 "Duplicate last fsn: %u (top: %u) -- abort\n",
1387 chk->rec.data.fsn_num, control->top_fsn);
1388 sctp_abort_in_reasm(stcb, control,
1390 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1393 if (asoc->idata_supported || control->first_frag_seen) {
1395 * For IDATA we always check since we know
1396 * that the first fragment is 0. For old
1397 * DATA we have to receive the first before
1398 * we know the first FSN (which is the TSN).
1401 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1402 /* We have already delivered up to
1403 * this so its a dup */
1404 SCTPDBG(SCTP_DEBUG_XXX,
1405 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1406 chk->rec.data.fsn_num, control->fsn_included);
1407 sctp_abort_in_reasm(stcb, control, chk,
1409 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1413 /* validate not beyond top FSN if we have seen last
1415 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1416 SCTPDBG(SCTP_DEBUG_XXX,
1417 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1418 chk->rec.data.fsn_num,
1420 sctp_abort_in_reasm(stcb, control, chk,
1422 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1427 * If we reach here, we need to place the new chunk in the
1428 * reassembly for this control.
1430 SCTPDBG(SCTP_DEBUG_XXX,
1431 "chunk is a not first fsn: %u needs to be inserted\n",
1432 chk->rec.data.fsn_num);
1433 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1434 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
1436 * This one in queue is bigger than the new
1437 * one, insert the new one before at.
1439 SCTPDBG(SCTP_DEBUG_XXX,
1440 "Insert it before fsn: %u\n",
1441 at->rec.data.fsn_num);
1442 asoc->size_on_reasm_queue += chk->send_size;
1443 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1444 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1447 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
1448 /* Gak, He sent me a duplicate str seq
1451 * foo bar, I guess I will just free this
1452 * new guy, should we abort too? FIX ME
1453 * MAYBE? Or it COULD be that the SSN's have
1454 * wrapped. Maybe I should compare to TSN
1455 * somehow... sigh for now just blow away
1458 SCTPDBG(SCTP_DEBUG_XXX,
1459 "Duplicate to fsn: %u -- abort\n",
1460 at->rec.data.fsn_num);
1461 sctp_abort_in_reasm(stcb, control,
1463 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1467 if (inserted == 0) {
1468 /* Goes on the end */
1469 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1470 chk->rec.data.fsn_num);
1471 asoc->size_on_reasm_queue += chk->send_size;
1472 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1473 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1477 * Ok lets see if we can suck any up into the control structure that
1478 * are in seq if it makes sense.
1482 * If the first fragment has not been seen there is no sense in
1485 if (control->first_frag_seen) {
1486 next_fsn = control->fsn_included + 1;
1487 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1488 if (at->rec.data.fsn_num == next_fsn) {
1489 /* We can add this one now to the control */
1490 SCTPDBG(SCTP_DEBUG_XXX,
1491 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1493 at->rec.data.fsn_num,
1494 next_fsn, control->fsn_included);
1495 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1496 sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1497 if (control->on_read_q) {
1501 if (control->end_added && control->pdapi_started) {
1502 if (strm->pd_api_started) {
1503 strm->pd_api_started = 0;
1504 control->pdapi_started = 0;
1506 if (control->on_read_q == 0) {
1507 sctp_add_to_readq(stcb->sctp_ep, stcb,
1509 &stcb->sctp_socket->so_rcv, control->end_added,
1510 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1521 /* Need to wakeup the reader */
1522 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1526 static struct sctp_queued_to_read *
1527 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
1529 struct sctp_queued_to_read *control;
1532 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1533 if (control->msg_id == msg_id) {
1539 control = TAILQ_FIRST(&strm->uno_inqueue);
1542 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1543 if (control->msg_id == msg_id) {
1552 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1553 struct mbuf **m, int offset, int chk_length,
1554 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1555 int *break_flag, int last_chunk, uint8_t chtype)
1557 /* Process a data chunk */
1558 /* struct sctp_tmit_chunk *chk; */
1559 struct sctp_data_chunk *ch;
1560 struct sctp_idata_chunk *nch, chunk_buf;
1561 struct sctp_tmit_chunk *chk;
1562 uint32_t tsn, fsn, gap, msg_id;
1565 int need_reasm_check = 0;
1567 struct mbuf *op_err;
1568 char msg[SCTP_DIAG_INFO_LEN];
1569 struct sctp_queued_to_read *control = NULL;
1570 uint32_t protocol_id;
1571 uint8_t chunk_flags;
1572 struct sctp_stream_reset_list *liste;
1573 struct sctp_stream_in *strm;
1576 int created_control = 0;
1580 if (chtype == SCTP_IDATA) {
1581 nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1582 sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1583 ch = (struct sctp_data_chunk *)nch;
1584 clen = sizeof(struct sctp_idata_chunk);
1585 tsn = ntohl(ch->dp.tsn);
1586 msg_id = ntohl(nch->dp.msg_id);
1587 protocol_id = nch->dp.ppid_fsn.protocol_id;
1588 if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1591 fsn = ntohl(nch->dp.ppid_fsn.fsn);
1594 ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1595 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1596 tsn = ntohl(ch->dp.tsn);
1597 protocol_id = ch->dp.protocol_id;
1598 clen = sizeof(struct sctp_data_chunk);
1600 msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
1604 chunk_flags = ch->ch.chunk_flags;
1605 if ((size_t)chk_length == clen) {
1607 * Need to send an abort since we had a empty data chunk.
1609 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1610 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1611 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1615 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1616 asoc->send_sack = 1;
1618 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1619 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1620 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1625 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1626 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1627 /* It is a duplicate */
1628 SCTP_STAT_INCR(sctps_recvdupdata);
1629 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1630 /* Record a dup for the next outbound sack */
1631 asoc->dup_tsns[asoc->numduptsns] = tsn;
1634 asoc->send_sack = 1;
1637 /* Calculate the number of TSN's between the base and this TSN */
1638 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1639 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1640 /* Can't hold the bit in the mapping at max array, toss it */
1643 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1644 SCTP_TCB_LOCK_ASSERT(stcb);
1645 if (sctp_expand_mapping_array(asoc, gap)) {
1646 /* Can't expand, drop it */
1650 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1653 /* See if we have received this one already */
1654 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1655 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1656 SCTP_STAT_INCR(sctps_recvdupdata);
1657 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1658 /* Record a dup for the next outbound sack */
1659 asoc->dup_tsns[asoc->numduptsns] = tsn;
1662 asoc->send_sack = 1;
1666 * Check to see about the GONE flag, duplicates would cause a sack
1667 * to be sent up above
1669 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1670 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1671 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1673 * wait a minute, this guy is gone, there is no longer a
1674 * receiver. Send peer an ABORT!
1676 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1677 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1682 * Now before going further we see if there is room. If NOT then we
1683 * MAY let one through only IF this TSN is the one we are waiting
1684 * for on a partial delivery API.
1687 /* Is the stream valid? */
1688 strmno = ntohs(ch->dp.stream_id);
1690 if (strmno >= asoc->streamincnt) {
1691 struct sctp_error_invalid_stream *cause;
1693 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1694 0, M_NOWAIT, 1, MT_DATA);
1695 if (op_err != NULL) {
1696 /* add some space up front so prepend will work well */
1697 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1698 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1700 * Error causes are just param's and this one has
1701 * two back to back phdr, one with the error type
1702 * and size, the other with the streamid and a rsvd
1704 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1705 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1706 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1707 cause->stream_id = ch->dp.stream_id;
1708 cause->reserved = htons(0);
1709 sctp_queue_op_err(stcb, op_err);
1711 SCTP_STAT_INCR(sctps_badsid);
1712 SCTP_TCB_LOCK_ASSERT(stcb);
1713 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1714 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1715 asoc->highest_tsn_inside_nr_map = tsn;
1717 if (tsn == (asoc->cumulative_tsn + 1)) {
1718 /* Update cum-ack */
1719 asoc->cumulative_tsn = tsn;
1723 strm = &asoc->strmin[strmno];
1725 * If its a fragmented message, lets see if we can find the control
1726 * on the reassembly queues.
1728 if ((chtype == SCTP_IDATA) &&
1729 ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1732 * The first *must* be fsn 0, and other (middle/end) pieces
1733 * can *not* be fsn 0. XXX: This can happen in case of a
1734 * wrap around. Ignore is for now.
1736 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1737 msg_id, chunk_flags);
1740 control = sctp_find_reasm_entry(strm, msg_id, ordered, old_data);
1741 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1742 chunk_flags, control);
1743 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1744 /* See if we can find the re-assembly entity */
1745 if (control != NULL) {
1746 /* We found something, does it belong? */
1747 if (ordered && (msg_id != control->sinfo_ssn)) {
1748 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", msg_id);
1750 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1751 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1752 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1756 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1757 /* We can't have a switched order with an
1758 * unordered chunk */
1759 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1763 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1764 /* We can't have a switched unordered with a
1766 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1773 * Its a complete segment. Lets validate we don't have a
1774 * re-assembly going on with the same Stream/Seq (for
1775 * ordered) or in the same Stream for unordered.
1777 if (control != NULL) {
1778 if (ordered || (old_data == 0)) {
1779 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n",
1780 chunk_flags, msg_id);
1781 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", msg_id);
1784 if ((tsn == control->fsn_included + 1) &&
1785 (control->end_added == 0)) {
1786 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1794 /* now do the tests */
1795 if (((asoc->cnt_on_all_streams +
1796 asoc->cnt_on_reasm_queue +
1797 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1798 (((int)asoc->my_rwnd) <= 0)) {
1800 * When we have NO room in the rwnd we check to make sure
1801 * the reader is doing its job...
1803 if (stcb->sctp_socket->so_rcv.sb_cc) {
1804 /* some to read, wake-up */
1805 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1808 so = SCTP_INP_SO(stcb->sctp_ep);
1809 atomic_add_int(&stcb->asoc.refcnt, 1);
1810 SCTP_TCB_UNLOCK(stcb);
1811 SCTP_SOCKET_LOCK(so, 1);
1812 SCTP_TCB_LOCK(stcb);
1813 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1814 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1815 /* assoc was freed while we were unlocked */
1816 SCTP_SOCKET_UNLOCK(so, 1);
1820 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1821 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1822 SCTP_SOCKET_UNLOCK(so, 1);
1825 /* now is it in the mapping array of what we have accepted? */
1827 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1828 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1829 /* Nope not in the valid range dump it */
1831 sctp_set_rwnd(stcb, asoc);
1832 if ((asoc->cnt_on_all_streams +
1833 asoc->cnt_on_reasm_queue +
1834 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1835 SCTP_STAT_INCR(sctps_datadropchklmt);
1837 SCTP_STAT_INCR(sctps_datadroprwnd);
1843 if (control == NULL) {
1846 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1851 #ifdef SCTP_ASOCLOG_OF_TSNS
1852 SCTP_TCB_LOCK_ASSERT(stcb);
1853 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1854 asoc->tsn_in_at = 0;
1855 asoc->tsn_in_wrapped = 1;
1857 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1858 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1859 asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
1860 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1861 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1862 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1863 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1864 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1868 * Before we continue lets validate that we are not being fooled by
1869 * an evil attacker. We can only have Nk chunks based on our TSN
1870 * spread allowed by the mapping array N * 8 bits, so there is no
1871 * way our stream sequence numbers could have wrapped. We of course
1872 * only validate the FIRST fragment so the bit must be set.
1874 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1875 (TAILQ_EMPTY(&asoc->resetHead)) &&
1876 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1877 SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
1878 /* The incoming sseq is behind where we last delivered? */
1879 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1880 msg_id, asoc->strmin[strmno].last_sequence_delivered);
1882 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1883 asoc->strmin[strmno].last_sequence_delivered,
1884 tsn, strmno, msg_id);
1885 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1886 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1887 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1891 /************************************
1892 * From here down we may find ch-> invalid
1893 * so its a good idea NOT to use it.
1894 *************************************/
1896 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1898 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1900 if (last_chunk == 0) {
1902 dmbuf = SCTP_M_COPYM(*m,
1903 (offset + sizeof(struct sctp_idata_chunk)),
1906 dmbuf = SCTP_M_COPYM(*m,
1907 (offset + sizeof(struct sctp_data_chunk)),
1910 #ifdef SCTP_MBUF_LOGGING
1911 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1912 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1916 /* We can steal the last chunk */
1920 /* lop off the top part */
1922 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1924 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1926 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1927 l_len = SCTP_BUF_LEN(dmbuf);
1930 * need to count up the size hopefully does not hit
1936 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1937 l_len += SCTP_BUF_LEN(lat);
1940 if (l_len > the_len) {
1941 /* Trim the end round bytes off too */
1942 m_adj(dmbuf, -(l_len - the_len));
1945 if (dmbuf == NULL) {
1946 SCTP_STAT_INCR(sctps_nomem);
1950 * Now no matter what we need a control, get one if we don't have
1951 * one (we may have gotten it above when we found the message was
1954 if (control == NULL) {
1955 sctp_alloc_a_readq(stcb, control);
1956 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1961 if (control == NULL) {
1962 SCTP_STAT_INCR(sctps_nomem);
1965 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1966 control->data = dmbuf;
1967 control->tail_mbuf = NULL;
1968 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1969 control->top_fsn = control->fsn_included = fsn;
1971 created_control = 1;
1973 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n",
1974 chunk_flags, ordered, msg_id, control);
1975 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1976 TAILQ_EMPTY(&asoc->resetHead) &&
1978 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
1979 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1980 /* Candidate for express delivery */
1982 * Its not fragmented, No PD-API is up, Nothing in the
1983 * delivery queue, Its un-ordered OR ordered and the next to
1984 * deliver AND nothing else is stuck on the stream queue,
1985 * And there is room for it in the socket buffer. Lets just
1986 * stuff it up the buffer....
1988 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1989 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1990 asoc->highest_tsn_inside_nr_map = tsn;
1992 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n",
1995 sctp_add_to_readq(stcb->sctp_ep, stcb,
1996 control, &stcb->sctp_socket->so_rcv,
1997 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1999 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2000 /* for ordered, bump what we delivered */
2001 strm->last_sequence_delivered++;
2003 SCTP_STAT_INCR(sctps_recvexpress);
2004 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2005 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
2006 SCTP_STR_LOG_FROM_EXPRS_DEL);
2009 goto finish_express_del;
2011 /* Now will we need a chunk too? */
2012 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2013 sctp_alloc_a_chunk(stcb, chk);
2015 /* No memory so we drop the chunk */
2016 SCTP_STAT_INCR(sctps_nomem);
2017 if (last_chunk == 0) {
2018 /* we copied it, free the copy */
2019 sctp_m_freem(dmbuf);
2023 chk->rec.data.TSN_seq = tsn;
2024 chk->no_fr_allowed = 0;
2025 chk->rec.data.fsn_num = fsn;
2026 chk->rec.data.stream_seq = msg_id;
2027 chk->rec.data.stream_number = strmno;
2028 chk->rec.data.payloadtype = protocol_id;
2029 chk->rec.data.context = stcb->asoc.context;
2030 chk->rec.data.doing_fast_retransmit = 0;
2031 chk->rec.data.rcv_flags = chunk_flags;
2033 chk->send_size = the_len;
2035 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n",
2038 atomic_add_int(&net->ref_count, 1);
2041 /* Set the appropriate TSN mark */
2042 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2043 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2044 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2045 asoc->highest_tsn_inside_nr_map = tsn;
2048 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2049 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2050 asoc->highest_tsn_inside_map = tsn;
2053 /* Now is it complete (i.e. not fragmented)? */
2054 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2056 * Special check for when streams are resetting. We could be
2057 * more smart about this and check the actual stream to see
2058 * if it is not being reset.. that way we would not create a
2059 * HOLB when amongst streams being reset and those not being
2063 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2064 SCTP_TSN_GT(tsn, liste->tsn)) {
2066 * yep its past where we need to reset... go ahead
2069 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2071 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2073 struct sctp_queued_to_read *ctlOn, *nctlOn;
2074 unsigned char inserted = 0;
2076 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2077 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2082 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2087 if (inserted == 0) {
2089 * must be put at end, use prevP
2090 * (all setup from loop) to setup
2093 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2096 goto finish_express_del;
2098 if (chunk_flags & SCTP_DATA_UNORDERED) {
2099 /* queue directly into socket buffer */
2100 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n",
2102 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2103 sctp_add_to_readq(stcb->sctp_ep, stcb,
2105 &stcb->sctp_socket->so_rcv, 1,
2106 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2109 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control,
2111 sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2119 goto finish_express_del;
2121 /* If we reach here its a reassembly */
2122 need_reasm_check = 1;
2123 SCTPDBG(SCTP_DEBUG_XXX,
2124 "Queue data to stream for reasm control: %p msg_id: %u\n",
2126 sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2129 * the assoc is now gone and chk was put onto the reasm
2130 * queue, which has all been freed.
2138 /* Here we tidy up things */
2139 if (tsn == (asoc->cumulative_tsn + 1)) {
2140 /* Update cum-ack */
2141 asoc->cumulative_tsn = tsn;
2147 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2149 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2151 SCTP_STAT_INCR(sctps_recvdata);
2152 /* Set it present please */
2153 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2154 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2156 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2157 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2158 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2160 /* check the special flag for stream resets */
2161 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2162 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2164 * we have finished working through the backlogged TSN's now
2165 * time to reset streams. 1: call reset function. 2: free
2166 * pending_reply space 3: distribute any chunks in
2167 * pending_reply_queue.
2169 struct sctp_queued_to_read *ctl, *nctl;
2171 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2172 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2173 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2174 SCTP_FREE(liste, SCTP_M_STRESET);
2175 /* sa_ignore FREED_MEMORY */
2176 liste = TAILQ_FIRST(&asoc->resetHead);
2177 if (TAILQ_EMPTY(&asoc->resetHead)) {
2178 /* All can be removed */
2179 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2180 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2181 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2187 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2188 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2192 * if ctl->sinfo_tsn is <= liste->tsn we can
2193 * process it which is the NOT of
2194 * ctl->sinfo_tsn > liste->tsn
2196 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2197 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2204 * Now service re-assembly to pick up anything that has been
2205 * held on reassembly queue?
2207 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2208 need_reasm_check = 0;
2210 if (need_reasm_check) {
2211 /* Another one waits ? */
2212 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2217 static const int8_t sctp_map_lookup_tab[256] = {
2218 0, 1, 0, 2, 0, 1, 0, 3,
2219 0, 1, 0, 2, 0, 1, 0, 4,
2220 0, 1, 0, 2, 0, 1, 0, 3,
2221 0, 1, 0, 2, 0, 1, 0, 5,
2222 0, 1, 0, 2, 0, 1, 0, 3,
2223 0, 1, 0, 2, 0, 1, 0, 4,
2224 0, 1, 0, 2, 0, 1, 0, 3,
2225 0, 1, 0, 2, 0, 1, 0, 6,
2226 0, 1, 0, 2, 0, 1, 0, 3,
2227 0, 1, 0, 2, 0, 1, 0, 4,
2228 0, 1, 0, 2, 0, 1, 0, 3,
2229 0, 1, 0, 2, 0, 1, 0, 5,
2230 0, 1, 0, 2, 0, 1, 0, 3,
2231 0, 1, 0, 2, 0, 1, 0, 4,
2232 0, 1, 0, 2, 0, 1, 0, 3,
2233 0, 1, 0, 2, 0, 1, 0, 7,
2234 0, 1, 0, 2, 0, 1, 0, 3,
2235 0, 1, 0, 2, 0, 1, 0, 4,
2236 0, 1, 0, 2, 0, 1, 0, 3,
2237 0, 1, 0, 2, 0, 1, 0, 5,
2238 0, 1, 0, 2, 0, 1, 0, 3,
2239 0, 1, 0, 2, 0, 1, 0, 4,
2240 0, 1, 0, 2, 0, 1, 0, 3,
2241 0, 1, 0, 2, 0, 1, 0, 6,
2242 0, 1, 0, 2, 0, 1, 0, 3,
2243 0, 1, 0, 2, 0, 1, 0, 4,
2244 0, 1, 0, 2, 0, 1, 0, 3,
2245 0, 1, 0, 2, 0, 1, 0, 5,
2246 0, 1, 0, 2, 0, 1, 0, 3,
2247 0, 1, 0, 2, 0, 1, 0, 4,
2248 0, 1, 0, 2, 0, 1, 0, 3,
2249 0, 1, 0, 2, 0, 1, 0, 8
2254 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2257 * Now we also need to check the mapping array in a couple of ways.
2258 * 1) Did we move the cum-ack point?
2260 * When you first glance at this you might think that all entries
2261 * that make up the position of the cum-ack would be in the
2262 * nr-mapping array only.. i.e. things up to the cum-ack are always
2263 * deliverable. Thats true with one exception, when its a fragmented
2264 * message we may not deliver the data until some threshold (or all
2265 * of it) is in place. So we must OR the nr_mapping_array and
2266 * mapping_array to get a true picture of the cum-ack.
2268 struct sctp_association *asoc;
2271 int slide_from, slide_end, lgap, distance;
2272 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2276 old_cumack = asoc->cumulative_tsn;
2277 old_base = asoc->mapping_array_base_tsn;
2278 old_highest = asoc->highest_tsn_inside_map;
2280 * We could probably improve this a small bit by calculating the
2281 * offset of the current cum-ack as the starting point.
2284 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2285 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2289 /* there is a 0 bit */
2290 at += sctp_map_lookup_tab[val];
2294 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2296 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2297 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2299 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2300 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2302 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2303 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2304 sctp_print_mapping_array(asoc);
2305 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2306 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2308 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2309 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2312 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2313 highest_tsn = asoc->highest_tsn_inside_nr_map;
2315 highest_tsn = asoc->highest_tsn_inside_map;
2317 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2318 /* The complete array was completed by a single FR */
2319 /* highest becomes the cum-ack */
2325 /* clear the array */
2326 clr = ((at + 7) >> 3);
2327 if (clr > asoc->mapping_array_size) {
2328 clr = asoc->mapping_array_size;
2330 memset(asoc->mapping_array, 0, clr);
2331 memset(asoc->nr_mapping_array, 0, clr);
2333 for (i = 0; i < asoc->mapping_array_size; i++) {
2334 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2335 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2336 sctp_print_mapping_array(asoc);
2340 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2341 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2342 } else if (at >= 8) {
2343 /* we can slide the mapping array down */
2344 /* slide_from holds where we hit the first NON 0xff byte */
2347 * now calculate the ceiling of the move using our highest
2350 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2351 slide_end = (lgap >> 3);
2352 if (slide_end < slide_from) {
2353 sctp_print_mapping_array(asoc);
2355 panic("impossible slide");
2357 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2358 lgap, slide_end, slide_from, at);
2362 if (slide_end > asoc->mapping_array_size) {
2364 panic("would overrun buffer");
2366 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2367 asoc->mapping_array_size, slide_end);
2368 slide_end = asoc->mapping_array_size;
2371 distance = (slide_end - slide_from) + 1;
2372 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2373 sctp_log_map(old_base, old_cumack, old_highest,
2374 SCTP_MAP_PREPARE_SLIDE);
2375 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2376 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2378 if (distance + slide_from > asoc->mapping_array_size ||
2381 * Here we do NOT slide forward the array so that
2382 * hopefully when more data comes in to fill it up
2383 * we will be able to slide it forward. Really I
2384 * don't think this should happen :-0
2387 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2388 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2389 (uint32_t) asoc->mapping_array_size,
2390 SCTP_MAP_SLIDE_NONE);
2395 for (ii = 0; ii < distance; ii++) {
2396 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2397 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2400 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2401 asoc->mapping_array[ii] = 0;
2402 asoc->nr_mapping_array[ii] = 0;
2404 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2405 asoc->highest_tsn_inside_map += (slide_from << 3);
2407 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2408 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2410 asoc->mapping_array_base_tsn += (slide_from << 3);
2411 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2412 sctp_log_map(asoc->mapping_array_base_tsn,
2413 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2414 SCTP_MAP_SLIDE_RESULT);
2421 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2423 struct sctp_association *asoc;
2424 uint32_t highest_tsn;
2427 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2428 highest_tsn = asoc->highest_tsn_inside_nr_map;
2430 highest_tsn = asoc->highest_tsn_inside_map;
2434 * Now we need to see if we need to queue a sack or just start the
2435 * timer (if allowed).
2437 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2439 * Ok special case, in SHUTDOWN-SENT case. here we maker
2440 * sure SACK timer is off and instead send a SHUTDOWN and a
2443 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2444 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2445 stcb->sctp_ep, stcb, NULL,
2446 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2448 sctp_send_shutdown(stcb,
2449 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2450 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2454 /* is there a gap now ? */
2455 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2458 * CMT DAC algorithm: increase number of packets received
2461 stcb->asoc.cmt_dac_pkts_rcvd++;
2463 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2465 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2467 (stcb->asoc.numduptsns) || /* we have dup's */
2468 (is_a_gap) || /* is still a gap */
2469 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2470 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ ) {
2472 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2473 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2474 (stcb->asoc.send_sack == 0) &&
2475 (stcb->asoc.numduptsns == 0) &&
2476 (stcb->asoc.delayed_ack) &&
2477 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2480 * CMT DAC algorithm: With CMT, delay acks
2481 * even in the face of
2483 * reordering. Therefore, if acks that do
2484 * not have to be sent because of the above
2485 * reasons, will be delayed. That is, acks
2486 * that would have been sent due to gap
2487 * reports will be delayed with DAC. Start
2488 * the delayed ack timer.
2490 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2491 stcb->sctp_ep, stcb, NULL);
2494 * Ok we must build a SACK since the timer
2495 * is pending, we got our first packet OR
2496 * there are gaps or duplicates.
2498 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2499 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2502 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2503 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2504 stcb->sctp_ep, stcb, NULL);
2511 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2512 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2513 struct sctp_nets *net, uint32_t * high_tsn)
2515 struct sctp_chunkhdr *ch, chunk_buf;
2516 struct sctp_association *asoc;
2517 int num_chunks = 0; /* number of control chunks processed */
2519 int chk_length, break_flag, last_chunk;
2520 int abort_flag = 0, was_a_gap;
2522 uint32_t highest_tsn;
2525 sctp_set_rwnd(stcb, &stcb->asoc);
2528 SCTP_TCB_LOCK_ASSERT(stcb);
2530 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2531 highest_tsn = asoc->highest_tsn_inside_nr_map;
2533 highest_tsn = asoc->highest_tsn_inside_map;
2535 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2537 * setup where we got the last DATA packet from for any SACK that
2538 * may need to go out. Don't bump the net. This is done ONLY when a
2539 * chunk is assigned.
2541 asoc->last_data_chunk_from = net;
2544 * Now before we proceed we must figure out if this is a wasted
2545 * cluster... i.e. it is a small packet sent in and yet the driver
2546 * underneath allocated a full cluster for it. If so we must copy it
2547 * to a smaller mbuf and free up the cluster mbuf. This will help
2548 * with cluster starvation. Note for __Panda__ we don't do this
2549 * since it has clusters all the way down to 64 bytes.
2551 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2552 /* we only handle mbufs that are singletons.. not chains */
2553 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2555 /* ok lets see if we can copy the data up */
2558 /* get the pointers and copy */
2559 to = mtod(m, caddr_t *);
2560 from = mtod((*mm), caddr_t *);
2561 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2562 /* copy the length and free up the old */
2563 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2565 /* success, back copy */
2568 /* We are in trouble in the mbuf world .. yikes */
2572 /* get pointer to the first chunk header */
2573 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2574 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2579 * process all DATA chunks...
2581 *high_tsn = asoc->cumulative_tsn;
2583 asoc->data_pkts_seen++;
2584 while (stop_proc == 0) {
2585 /* validate chunk length */
2586 chk_length = ntohs(ch->chunk_length);
2587 if (length - *offset < chk_length) {
2588 /* all done, mutulated chunk */
2592 if ((asoc->idata_supported == 1) &&
2593 (ch->chunk_type == SCTP_DATA)) {
2594 struct mbuf *op_err;
2595 char msg[SCTP_DIAG_INFO_LEN];
2597 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2598 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2599 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2600 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2603 if ((asoc->idata_supported == 0) &&
2604 (ch->chunk_type == SCTP_IDATA)) {
2605 struct mbuf *op_err;
2606 char msg[SCTP_DIAG_INFO_LEN];
2608 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2609 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2610 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2611 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2614 if ((ch->chunk_type == SCTP_DATA) ||
2615 (ch->chunk_type == SCTP_IDATA)) {
2618 if (ch->chunk_type == SCTP_DATA) {
2619 clen = sizeof(struct sctp_data_chunk);
2621 clen = sizeof(struct sctp_idata_chunk);
2623 if (chk_length < clen) {
2625 * Need to send an abort since we had a
2626 * invalid data chunk.
2628 struct mbuf *op_err;
2629 char msg[SCTP_DIAG_INFO_LEN];
2631 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2633 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2634 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2635 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2638 #ifdef SCTP_AUDITING_ENABLED
2639 sctp_audit_log(0xB1, 0);
2641 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2646 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2647 chk_length, net, high_tsn, &abort_flag, &break_flag,
2648 last_chunk, ch->chunk_type)) {
2656 * Set because of out of rwnd space and no
2657 * drop rep space left.
2663 /* not a data chunk in the data region */
2664 switch (ch->chunk_type) {
2665 case SCTP_INITIATION:
2666 case SCTP_INITIATION_ACK:
2667 case SCTP_SELECTIVE_ACK:
2668 case SCTP_NR_SELECTIVE_ACK:
2669 case SCTP_HEARTBEAT_REQUEST:
2670 case SCTP_HEARTBEAT_ACK:
2671 case SCTP_ABORT_ASSOCIATION:
2673 case SCTP_SHUTDOWN_ACK:
2674 case SCTP_OPERATION_ERROR:
2675 case SCTP_COOKIE_ECHO:
2676 case SCTP_COOKIE_ACK:
2679 case SCTP_SHUTDOWN_COMPLETE:
2680 case SCTP_AUTHENTICATION:
2681 case SCTP_ASCONF_ACK:
2682 case SCTP_PACKET_DROPPED:
2683 case SCTP_STREAM_RESET:
2684 case SCTP_FORWARD_CUM_TSN:
2688 * Now, what do we do with KNOWN
2689 * chunks that are NOT in the right
2692 * For now, I do nothing but ignore
2693 * them. We may later want to add
2694 * sysctl stuff to switch out and do
2695 * either an ABORT() or possibly
2698 struct mbuf *op_err;
2699 char msg[SCTP_DIAG_INFO_LEN];
2701 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2703 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2704 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2708 /* unknown chunk type, use bit rules */
2709 if (ch->chunk_type & 0x40) {
2710 /* Add a error report to the queue */
2711 struct mbuf *op_err;
2712 struct sctp_gen_error_cause *cause;
2714 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2715 0, M_NOWAIT, 1, MT_DATA);
2716 if (op_err != NULL) {
2717 cause = mtod(op_err, struct sctp_gen_error_cause *);
2718 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2719 cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2720 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2721 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2722 if (SCTP_BUF_NEXT(op_err) != NULL) {
2723 sctp_queue_op_err(stcb, op_err);
2725 sctp_m_freem(op_err);
2729 if ((ch->chunk_type & 0x80) == 0) {
2730 /* discard the rest of this packet */
2732 } /* else skip this bad chunk and
2733 * continue... */ break;
2734 } /* switch of chunk type */
2736 *offset += SCTP_SIZE32(chk_length);
2737 if ((*offset >= length) || stop_proc) {
2738 /* no more data left in the mbuf chain */
2742 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2743 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2752 * we need to report rwnd overrun drops.
2754 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2758 * Did we get data, if so update the time for auto-close and
2759 * give peer credit for being alive.
2761 SCTP_STAT_INCR(sctps_recvpktwithdata);
2762 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2763 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2764 stcb->asoc.overall_error_count,
2766 SCTP_FROM_SCTP_INDATA,
2769 stcb->asoc.overall_error_count = 0;
2770 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2772 /* now service all of the reassm queue if needed */
2773 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2774 /* Assure that we ack right away */
2775 stcb->asoc.send_sack = 1;
2777 /* Start a sack timer or QUEUE a SACK for sending */
2778 sctp_sack_check(stcb, was_a_gap);
2783 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2784 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2786 uint32_t * biggest_newly_acked_tsn,
2787 uint32_t * this_sack_lowest_newack,
2790 struct sctp_tmit_chunk *tp1;
2791 unsigned int theTSN;
2792 int j, wake_him = 0, circled = 0;
2794 /* Recover the tp1 we last saw */
2797 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2799 for (j = frag_strt; j <= frag_end; j++) {
2800 theTSN = j + last_tsn;
2802 if (tp1->rec.data.doing_fast_retransmit)
2806 * CMT: CUCv2 algorithm. For each TSN being
2807 * processed from the sent queue, track the
2808 * next expected pseudo-cumack, or
2809 * rtx_pseudo_cumack, if required. Separate
2810 * cumack trackers for first transmissions,
2811 * and retransmissions.
2813 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2814 (tp1->whoTo->find_pseudo_cumack == 1) &&
2815 (tp1->snd_count == 1)) {
2816 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2817 tp1->whoTo->find_pseudo_cumack = 0;
2819 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2820 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2821 (tp1->snd_count > 1)) {
2822 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2823 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2825 if (tp1->rec.data.TSN_seq == theTSN) {
2826 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2828 * must be held until
2831 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2833 * If it is less than RESEND, it is
2834 * now no-longer in flight.
2835 * Higher values may already be set
2836 * via previous Gap Ack Blocks...
2837 * i.e. ACKED or RESEND.
2839 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2840 *biggest_newly_acked_tsn)) {
2841 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2844 * CMT: SFR algo (and HTNA) - set
2845 * saw_newack to 1 for dest being
2846 * newly acked. update
2847 * this_sack_highest_newack if
2850 if (tp1->rec.data.chunk_was_revoked == 0)
2851 tp1->whoTo->saw_newack = 1;
2853 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2854 tp1->whoTo->this_sack_highest_newack)) {
2855 tp1->whoTo->this_sack_highest_newack =
2856 tp1->rec.data.TSN_seq;
2859 * CMT DAC algo: also update
2860 * this_sack_lowest_newack
2862 if (*this_sack_lowest_newack == 0) {
2863 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2864 sctp_log_sack(*this_sack_lowest_newack,
2866 tp1->rec.data.TSN_seq,
2869 SCTP_LOG_TSN_ACKED);
2871 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2874 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2875 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2876 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2877 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2878 * Separate pseudo_cumack trackers for first transmissions and
2881 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2882 if (tp1->rec.data.chunk_was_revoked == 0) {
2883 tp1->whoTo->new_pseudo_cumack = 1;
2885 tp1->whoTo->find_pseudo_cumack = 1;
2887 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2888 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2890 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2891 if (tp1->rec.data.chunk_was_revoked == 0) {
2892 tp1->whoTo->new_pseudo_cumack = 1;
2894 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2896 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2897 sctp_log_sack(*biggest_newly_acked_tsn,
2899 tp1->rec.data.TSN_seq,
2902 SCTP_LOG_TSN_ACKED);
2904 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2905 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2906 tp1->whoTo->flight_size,
2908 (uint32_t) (uintptr_t) tp1->whoTo,
2909 tp1->rec.data.TSN_seq);
2911 sctp_flight_size_decrease(tp1);
2912 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2913 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2916 sctp_total_flight_decrease(stcb, tp1);
2918 tp1->whoTo->net_ack += tp1->send_size;
2919 if (tp1->snd_count < 2) {
2921 * True non-retransmited chunk
2923 tp1->whoTo->net_ack2 += tp1->send_size;
2931 sctp_calculate_rto(stcb,
2934 &tp1->sent_rcv_time,
2935 sctp_align_safe_nocopy,
2936 SCTP_RTT_FROM_DATA);
2939 if (tp1->whoTo->rto_needed == 0) {
2940 tp1->whoTo->rto_needed = 1;
2946 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2947 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2948 stcb->asoc.this_sack_highest_gap)) {
2949 stcb->asoc.this_sack_highest_gap =
2950 tp1->rec.data.TSN_seq;
2952 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2953 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2954 #ifdef SCTP_AUDITING_ENABLED
2955 sctp_audit_log(0xB2,
2956 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2961 * All chunks NOT UNSENT fall through here and are marked
2962 * (leave PR-SCTP ones that are to skip alone though)
2964 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2965 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2966 tp1->sent = SCTP_DATAGRAM_MARKED;
2968 if (tp1->rec.data.chunk_was_revoked) {
2969 /* deflate the cwnd */
2970 tp1->whoTo->cwnd -= tp1->book_size;
2971 tp1->rec.data.chunk_was_revoked = 0;
2973 /* NR Sack code here */
2975 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2976 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2977 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2980 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2983 if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
2984 (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
2985 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
2986 stcb->asoc.trigger_reset = 1;
2988 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2992 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2993 sctp_m_freem(tp1->data);
3000 } /* if (tp1->TSN_seq == theTSN) */ if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3003 tp1 = TAILQ_NEXT(tp1, sctp_next);
3004 if ((tp1 == NULL) && (circled == 0)) {
3006 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3008 } /* end while (tp1) */
3011 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3013 /* In case the fragments were not in order we must reset */
3014 } /* end for (j = fragStart */
3016 return (wake_him); /* Return value only used for nr-sack */
3021 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3022 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3023 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3024 int num_seg, int num_nr_seg, int *rto_ok)
3026 struct sctp_gap_ack_block *frag, block;
3027 struct sctp_tmit_chunk *tp1;
3032 uint16_t frag_strt, frag_end, prev_frag_end;
3034 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3038 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3041 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3043 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3044 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3045 *offset += sizeof(block);
3047 return (chunk_freed);
3049 frag_strt = ntohs(frag->start);
3050 frag_end = ntohs(frag->end);
3052 if (frag_strt > frag_end) {
3053 /* This gap report is malformed, skip it. */
3056 if (frag_strt <= prev_frag_end) {
3057 /* This gap report is not in order, so restart. */
3058 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3060 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3061 *biggest_tsn_acked = last_tsn + frag_end;
3068 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3069 non_revocable, &num_frs, biggest_newly_acked_tsn,
3070 this_sack_lowest_newack, rto_ok)) {
3073 prev_frag_end = frag_end;
3075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3077 sctp_log_fr(*biggest_tsn_acked,
3078 *biggest_newly_acked_tsn,
3079 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3081 return (chunk_freed);
3085 sctp_check_for_revoked(struct sctp_tcb *stcb,
3086 struct sctp_association *asoc, uint32_t cumack,
3087 uint32_t biggest_tsn_acked)
3089 struct sctp_tmit_chunk *tp1;
3091 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3092 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3094 * ok this guy is either ACK or MARKED. If it is
3095 * ACKED it has been previously acked but not this
3096 * time i.e. revoked. If it is MARKED it was ACK'ed
3099 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3102 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3103 /* it has been revoked */
3104 tp1->sent = SCTP_DATAGRAM_SENT;
3105 tp1->rec.data.chunk_was_revoked = 1;
3107 * We must add this stuff back in to assure
3108 * timers and such get started.
3110 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3111 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3112 tp1->whoTo->flight_size,
3114 (uint32_t) (uintptr_t) tp1->whoTo,
3115 tp1->rec.data.TSN_seq);
3117 sctp_flight_size_increase(tp1);
3118 sctp_total_flight_increase(stcb, tp1);
3120 * We inflate the cwnd to compensate for our
3121 * artificial inflation of the flight_size.
3123 tp1->whoTo->cwnd += tp1->book_size;
3124 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3125 sctp_log_sack(asoc->last_acked_seq,
3127 tp1->rec.data.TSN_seq,
3130 SCTP_LOG_TSN_REVOKED);
3132 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3133 /* it has been re-acked in this SACK */
3134 tp1->sent = SCTP_DATAGRAM_ACKED;
3137 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3144 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3145 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3147 struct sctp_tmit_chunk *tp1;
3148 int strike_flag = 0;
3150 int tot_retrans = 0;
3151 uint32_t sending_seq;
3152 struct sctp_nets *net;
3153 int num_dests_sacked = 0;
3156 * select the sending_seq, this is either the next thing ready to be
3157 * sent but not transmitted, OR, the next seq we assign.
3159 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3161 sending_seq = asoc->sending_seq;
3163 sending_seq = tp1->rec.data.TSN_seq;
3166 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3167 if ((asoc->sctp_cmt_on_off > 0) &&
3168 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3169 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3170 if (net->saw_newack)
3174 if (stcb->asoc.prsctp_supported) {
3175 (void)SCTP_GETTIME_TIMEVAL(&now);
3177 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3179 if (tp1->no_fr_allowed) {
3180 /* this one had a timeout or something */
3183 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3184 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3185 sctp_log_fr(biggest_tsn_newly_acked,
3186 tp1->rec.data.TSN_seq,
3188 SCTP_FR_LOG_CHECK_STRIKE);
3190 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3191 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3195 if (stcb->asoc.prsctp_supported) {
3196 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3197 /* Is it expired? */
3198 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3199 /* Yes so drop it */
3200 if (tp1->data != NULL) {
3201 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3202 SCTP_SO_NOT_LOCKED);
3208 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3209 /* we are beyond the tsn in the sack */
3212 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3213 /* either a RESEND, ACKED, or MARKED */
3215 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3216 /* Continue strikin FWD-TSN chunks */
3217 tp1->rec.data.fwd_tsn_cnt++;
3222 * CMT : SFR algo (covers part of DAC and HTNA as well)
3224 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3226 * No new acks were receieved for data sent to this
3227 * dest. Therefore, according to the SFR algo for
3228 * CMT, no data sent to this dest can be marked for
3229 * FR using this SACK.
3232 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3233 tp1->whoTo->this_sack_highest_newack)) {
3235 * CMT: New acks were receieved for data sent to
3236 * this dest. But no new acks were seen for data
3237 * sent after tp1. Therefore, according to the SFR
3238 * algo for CMT, tp1 cannot be marked for FR using
3239 * this SACK. This step covers part of the DAC algo
3240 * and the HTNA algo as well.
3245 * Here we check to see if we were have already done a FR
3246 * and if so we see if the biggest TSN we saw in the sack is
3247 * smaller than the recovery point. If so we don't strike
3248 * the tsn... otherwise we CAN strike the TSN.
3251 * @@@ JRI: Check for CMT if (accum_moved &&
3252 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3255 if (accum_moved && asoc->fast_retran_loss_recovery) {
3257 * Strike the TSN if in fast-recovery and cum-ack
3260 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3261 sctp_log_fr(biggest_tsn_newly_acked,
3262 tp1->rec.data.TSN_seq,
3264 SCTP_FR_LOG_STRIKE_CHUNK);
3266 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3269 if ((asoc->sctp_cmt_on_off > 0) &&
3270 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3272 * CMT DAC algorithm: If SACK flag is set to
3273 * 0, then lowest_newack test will not pass
3274 * because it would have been set to the
3275 * cumack earlier. If not already to be
3276 * rtx'd, If not a mixed sack and if tp1 is
3277 * not between two sacked TSNs, then mark by
3278 * one more. NOTE that we are marking by one
3279 * additional time since the SACK DAC flag
3280 * indicates that two packets have been
3281 * received after this missing TSN.
3283 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3284 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3285 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3286 sctp_log_fr(16 + num_dests_sacked,
3287 tp1->rec.data.TSN_seq,
3289 SCTP_FR_LOG_STRIKE_CHUNK);
3294 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3295 (asoc->sctp_cmt_on_off == 0)) {
3297 * For those that have done a FR we must take
3298 * special consideration if we strike. I.e the
3299 * biggest_newly_acked must be higher than the
3300 * sending_seq at the time we did the FR.
3303 #ifdef SCTP_FR_TO_ALTERNATE
3305 * If FR's go to new networks, then we must only do
3306 * this for singly homed asoc's. However if the FR's
3307 * go to the same network (Armando's work) then its
3308 * ok to FR multiple times.
3316 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3317 tp1->rec.data.fast_retran_tsn)) {
3319 * Strike the TSN, since this ack is
3320 * beyond where things were when we
3323 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3324 sctp_log_fr(biggest_tsn_newly_acked,
3325 tp1->rec.data.TSN_seq,
3327 SCTP_FR_LOG_STRIKE_CHUNK);
3329 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3333 if ((asoc->sctp_cmt_on_off > 0) &&
3334 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3336 * CMT DAC algorithm: If
3337 * SACK flag is set to 0,
3338 * then lowest_newack test
3339 * will not pass because it
3340 * would have been set to
3341 * the cumack earlier. If
3342 * not already to be rtx'd,
3343 * If not a mixed sack and
3344 * if tp1 is not between two
3345 * sacked TSNs, then mark by
3346 * one more. NOTE that we
3347 * are marking by one
3348 * additional time since the
3349 * SACK DAC flag indicates
3350 * that two packets have
3351 * been received after this
3354 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3355 (num_dests_sacked == 1) &&
3356 SCTP_TSN_GT(this_sack_lowest_newack,
3357 tp1->rec.data.TSN_seq)) {
3358 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3359 sctp_log_fr(32 + num_dests_sacked,
3360 tp1->rec.data.TSN_seq,
3362 SCTP_FR_LOG_STRIKE_CHUNK);
3364 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3372 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3375 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3376 biggest_tsn_newly_acked)) {
3378 * We don't strike these: This is the HTNA
3379 * algorithm i.e. we don't strike If our TSN is
3380 * larger than the Highest TSN Newly Acked.
3384 /* Strike the TSN */
3385 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3386 sctp_log_fr(biggest_tsn_newly_acked,
3387 tp1->rec.data.TSN_seq,
3389 SCTP_FR_LOG_STRIKE_CHUNK);
3391 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3394 if ((asoc->sctp_cmt_on_off > 0) &&
3395 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3397 * CMT DAC algorithm: If SACK flag is set to
3398 * 0, then lowest_newack test will not pass
3399 * because it would have been set to the
3400 * cumack earlier. If not already to be
3401 * rtx'd, If not a mixed sack and if tp1 is
3402 * not between two sacked TSNs, then mark by
3403 * one more. NOTE that we are marking by one
3404 * additional time since the SACK DAC flag
3405 * indicates that two packets have been
3406 * received after this missing TSN.
3408 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3409 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3410 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3411 sctp_log_fr(48 + num_dests_sacked,
3412 tp1->rec.data.TSN_seq,
3414 SCTP_FR_LOG_STRIKE_CHUNK);
3420 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3421 struct sctp_nets *alt;
3423 /* fix counts and things */
3424 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3425 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3426 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3428 (uint32_t) (uintptr_t) tp1->whoTo,
3429 tp1->rec.data.TSN_seq);
3432 tp1->whoTo->net_ack++;
3433 sctp_flight_size_decrease(tp1);
3434 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3435 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3439 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3440 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3441 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3443 /* add back to the rwnd */
3444 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3446 /* remove from the total flight */
3447 sctp_total_flight_decrease(stcb, tp1);
3449 if ((stcb->asoc.prsctp_supported) &&
3450 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3451 /* Has it been retransmitted tv_sec times? -
3452 * we store the retran count there. */
3453 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3454 /* Yes, so drop it */
3455 if (tp1->data != NULL) {
3456 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3457 SCTP_SO_NOT_LOCKED);
3459 /* Make sure to flag we had a FR */
3460 tp1->whoTo->net_ack++;
3464 /* SCTP_PRINTF("OK, we are now ready to FR this
3466 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3467 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3471 /* This is a subsequent FR */
3472 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3474 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3475 if (asoc->sctp_cmt_on_off > 0) {
3477 * CMT: Using RTX_SSTHRESH policy for CMT.
3478 * If CMT is being used, then pick dest with
3479 * largest ssthresh for any retransmission.
3481 tp1->no_fr_allowed = 1;
3483 /* sa_ignore NO_NULL_CHK */
3484 if (asoc->sctp_cmt_pf > 0) {
3485 /* JRS 5/18/07 - If CMT PF is on,
3486 * use the PF version of
3488 alt = sctp_find_alternate_net(stcb, alt, 2);
3490 /* JRS 5/18/07 - If only CMT is on,
3491 * use the CMT version of
3493 /* sa_ignore NO_NULL_CHK */
3494 alt = sctp_find_alternate_net(stcb, alt, 1);
3500 * CUCv2: If a different dest is picked for
3501 * the retransmission, then new
3502 * (rtx-)pseudo_cumack needs to be tracked
3503 * for orig dest. Let CUCv2 track new (rtx-)
3504 * pseudo-cumack always.
3507 tp1->whoTo->find_pseudo_cumack = 1;
3508 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3510 } else {/* CMT is OFF */
3512 #ifdef SCTP_FR_TO_ALTERNATE
3513 /* Can we find an alternate? */
3514 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3517 * default behavior is to NOT retransmit
3518 * FR's to an alternate. Armando Caro's
3519 * paper details why.
3525 tp1->rec.data.doing_fast_retransmit = 1;
3527 /* mark the sending seq for possible subsequent FR's */
3529 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3530 * (uint32_t)tpi->rec.data.TSN_seq);
3532 if (TAILQ_EMPTY(&asoc->send_queue)) {
3534 * If the queue of send is empty then its
3535 * the next sequence number that will be
3536 * assigned so we subtract one from this to
3537 * get the one we last sent.
3539 tp1->rec.data.fast_retran_tsn = sending_seq;
3542 * If there are chunks on the send queue
3543 * (unsent data that has made it from the
3544 * stream queues but not out the door, we
3545 * take the first one (which will have the
3546 * lowest TSN) and subtract one to get the
3549 struct sctp_tmit_chunk *ttt;
3551 ttt = TAILQ_FIRST(&asoc->send_queue);
3552 tp1->rec.data.fast_retran_tsn =
3553 ttt->rec.data.TSN_seq;
3558 * this guy had a RTO calculation pending on
3561 if ((tp1->whoTo != NULL) &&
3562 (tp1->whoTo->rto_needed == 0)) {
3563 tp1->whoTo->rto_needed = 1;
3567 if (alt != tp1->whoTo) {
3568 /* yes, there is an alternate. */
3569 sctp_free_remote_addr(tp1->whoTo);
3570 /* sa_ignore FREED_MEMORY */
3572 atomic_add_int(&alt->ref_count, 1);
3578 struct sctp_tmit_chunk *
3579 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3580 struct sctp_association *asoc)
3582 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3586 if (asoc->prsctp_supported == 0) {
3589 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3590 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3591 tp1->sent != SCTP_DATAGRAM_RESEND &&
3592 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3593 /* no chance to advance, out of here */
3596 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3597 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3598 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3599 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3600 asoc->advanced_peer_ack_point,
3601 tp1->rec.data.TSN_seq, 0, 0);
3604 if (!PR_SCTP_ENABLED(tp1->flags)) {
3606 * We can't fwd-tsn past any that are reliable aka
3607 * retransmitted until the asoc fails.
3612 (void)SCTP_GETTIME_TIMEVAL(&now);
3616 * now we got a chunk which is marked for another
3617 * retransmission to a PR-stream but has run out its chances
3618 * already maybe OR has been marked to skip now. Can we skip
3619 * it if its a resend?
3621 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3622 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3624 * Now is this one marked for resend and its time is
3627 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3628 /* Yes so drop it */
3630 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3631 1, SCTP_SO_NOT_LOCKED);
3635 * No, we are done when hit one for resend
3636 * whos time as not expired.
3642 * Ok now if this chunk is marked to drop it we can clean up
3643 * the chunk, advance our peer ack point and we can check
3646 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3647 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3648 /* advance PeerAckPoint goes forward */
3649 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3650 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3652 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3653 /* No update but we do save the chk */
3658 * If it is still in RESEND we can advance no
3668 sctp_fs_audit(struct sctp_association *asoc)
3670 struct sctp_tmit_chunk *chk;
3671 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3674 int entry_flight, entry_cnt;
3679 entry_flight = asoc->total_flight;
3680 entry_cnt = asoc->total_flight_count;
3682 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3685 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3686 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3687 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3688 chk->rec.data.TSN_seq,
3692 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3694 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3696 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3703 if ((inflight > 0) || (inbetween > 0)) {
3705 panic("Flight size-express incorrect? \n");
3707 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3708 entry_flight, entry_cnt);
3710 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3711 inflight, inbetween, resend, above, acked);
3720 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3721 struct sctp_association *asoc,
3722 struct sctp_tmit_chunk *tp1)
3724 tp1->window_probe = 0;
3725 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3726 /* TSN's skipped we do NOT move back. */
3727 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3728 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3730 (uint32_t) (uintptr_t) tp1->whoTo,
3731 tp1->rec.data.TSN_seq);
3734 /* First setup this by shrinking flight */
3735 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3736 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3739 sctp_flight_size_decrease(tp1);
3740 sctp_total_flight_decrease(stcb, tp1);
3741 /* Now mark for resend */
3742 tp1->sent = SCTP_DATAGRAM_RESEND;
3743 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3745 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3746 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3747 tp1->whoTo->flight_size,
3749 (uint32_t) (uintptr_t) tp1->whoTo,
3750 tp1->rec.data.TSN_seq);
3755 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3756 uint32_t rwnd, int *abort_now, int ecne_seen)
3758 struct sctp_nets *net;
3759 struct sctp_association *asoc;
3760 struct sctp_tmit_chunk *tp1, *tp2;
3762 int win_probe_recovery = 0;
3763 int win_probe_recovered = 0;
3764 int j, done_once = 0;
3768 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3769 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3770 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3772 SCTP_TCB_LOCK_ASSERT(stcb);
3773 #ifdef SCTP_ASOCLOG_OF_TSNS
3774 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3775 stcb->asoc.cumack_log_at++;
3776 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3777 stcb->asoc.cumack_log_at = 0;
3781 old_rwnd = asoc->peers_rwnd;
3782 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3785 } else if (asoc->last_acked_seq == cumack) {
3786 /* Window update sack */
3787 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3788 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3789 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3790 /* SWS sender side engages */
3791 asoc->peers_rwnd = 0;
3793 if (asoc->peers_rwnd > old_rwnd) {
3798 /* First setup for CC stuff */
3799 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3800 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3801 /* Drag along the window_tsn for cwr's */
3802 net->cwr_window_tsn = cumack;
3804 net->prev_cwnd = net->cwnd;
3809 * CMT: Reset CUC and Fast recovery algo variables before
3812 net->new_pseudo_cumack = 0;
3813 net->will_exit_fast_recovery = 0;
3814 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3815 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3818 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3819 tp1 = TAILQ_LAST(&asoc->sent_queue,
3820 sctpchunk_listhead);
3821 send_s = tp1->rec.data.TSN_seq + 1;
3823 send_s = asoc->sending_seq;
3825 if (SCTP_TSN_GE(cumack, send_s)) {
3826 struct mbuf *op_err;
3827 char msg[SCTP_DIAG_INFO_LEN];
3831 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3833 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3834 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3835 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3838 asoc->this_sack_highest_gap = cumack;
3839 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3840 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3841 stcb->asoc.overall_error_count,
3843 SCTP_FROM_SCTP_INDATA,
3846 stcb->asoc.overall_error_count = 0;
3847 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3848 /* process the new consecutive TSN first */
3849 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3850 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3851 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3852 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3854 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3856 * If it is less than ACKED, it is
3857 * now no-longer in flight. Higher
3858 * values may occur during marking
3860 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3861 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3862 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3863 tp1->whoTo->flight_size,
3865 (uint32_t) (uintptr_t) tp1->whoTo,
3866 tp1->rec.data.TSN_seq);
3868 sctp_flight_size_decrease(tp1);
3869 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3870 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3873 /* sa_ignore NO_NULL_CHK */
3874 sctp_total_flight_decrease(stcb, tp1);
3876 tp1->whoTo->net_ack += tp1->send_size;
3877 if (tp1->snd_count < 2) {
3879 * True non-retransmited
3882 tp1->whoTo->net_ack2 +=
3885 /* update RTO too? */
3893 sctp_calculate_rto(stcb,
3895 &tp1->sent_rcv_time,
3896 sctp_align_safe_nocopy,
3897 SCTP_RTT_FROM_DATA);
3900 if (tp1->whoTo->rto_needed == 0) {
3901 tp1->whoTo->rto_needed = 1;
3907 * CMT: CUCv2 algorithm. From the
3908 * cumack'd TSNs, for each TSN being
3909 * acked for the first time, set the
3910 * following variables for the
3911 * corresp destination.
3912 * new_pseudo_cumack will trigger a
3914 * find_(rtx_)pseudo_cumack will
3915 * trigger search for the next
3916 * expected (rtx-)pseudo-cumack.
3918 tp1->whoTo->new_pseudo_cumack = 1;
3919 tp1->whoTo->find_pseudo_cumack = 1;
3920 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3922 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3923 /* sa_ignore NO_NULL_CHK */
3924 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3927 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3928 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3930 if (tp1->rec.data.chunk_was_revoked) {
3931 /* deflate the cwnd */
3932 tp1->whoTo->cwnd -= tp1->book_size;
3933 tp1->rec.data.chunk_was_revoked = 0;
3935 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3936 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3937 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3940 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3944 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3945 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3946 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3947 asoc->trigger_reset = 1;
3949 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3951 /* sa_ignore NO_NULL_CHK */
3952 sctp_free_bufspace(stcb, asoc, tp1, 1);
3953 sctp_m_freem(tp1->data);
3956 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3957 sctp_log_sack(asoc->last_acked_seq,
3959 tp1->rec.data.TSN_seq,
3962 SCTP_LOG_FREE_SENT);
3964 asoc->sent_queue_cnt--;
3965 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3972 /* sa_ignore NO_NULL_CHK */
3973 if (stcb->sctp_socket) {
3974 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3978 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3979 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3980 /* sa_ignore NO_NULL_CHK */
3981 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3983 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3984 so = SCTP_INP_SO(stcb->sctp_ep);
3985 atomic_add_int(&stcb->asoc.refcnt, 1);
3986 SCTP_TCB_UNLOCK(stcb);
3987 SCTP_SOCKET_LOCK(so, 1);
3988 SCTP_TCB_LOCK(stcb);
3989 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3990 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3991 /* assoc was freed while we were unlocked */
3992 SCTP_SOCKET_UNLOCK(so, 1);
3996 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3997 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3998 SCTP_SOCKET_UNLOCK(so, 1);
4001 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4002 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4006 /* JRS - Use the congestion control given in the CC module */
4007 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4008 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4009 if (net->net_ack2 > 0) {
4011 * Karn's rule applies to clearing error
4012 * count, this is optional.
4014 net->error_count = 0;
4015 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4016 /* addr came good */
4017 net->dest_state |= SCTP_ADDR_REACHABLE;
4018 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4019 0, (void *)net, SCTP_SO_NOT_LOCKED);
4021 if (net == stcb->asoc.primary_destination) {
4022 if (stcb->asoc.alternate) {
4023 /* release the alternate,
4024 * primary is good */
4025 sctp_free_remote_addr(stcb->asoc.alternate);
4026 stcb->asoc.alternate = NULL;
4029 if (net->dest_state & SCTP_ADDR_PF) {
4030 net->dest_state &= ~SCTP_ADDR_PF;
4031 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4032 stcb->sctp_ep, stcb, net,
4033 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4034 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4035 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4036 /* Done with this net */
4039 /* restore any doubled timers */
4040 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4041 if (net->RTO < stcb->asoc.minrto) {
4042 net->RTO = stcb->asoc.minrto;
4044 if (net->RTO > stcb->asoc.maxrto) {
4045 net->RTO = stcb->asoc.maxrto;
4049 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4051 asoc->last_acked_seq = cumack;
4053 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4054 /* nothing left in-flight */
4055 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4056 net->flight_size = 0;
4057 net->partial_bytes_acked = 0;
4059 asoc->total_flight = 0;
4060 asoc->total_flight_count = 0;
4063 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4064 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4065 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4066 /* SWS sender side engages */
4067 asoc->peers_rwnd = 0;
4069 if (asoc->peers_rwnd > old_rwnd) {
4070 win_probe_recovery = 1;
4072 /* Now assure a timer where data is queued at */
4075 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4078 if (win_probe_recovery && (net->window_probe)) {
4079 win_probe_recovered = 1;
4081 * Find first chunk that was used with window probe
4082 * and clear the sent
4084 /* sa_ignore FREED_MEMORY */
4085 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4086 if (tp1->window_probe) {
4087 /* move back to data send queue */
4088 sctp_window_probe_recovery(stcb, asoc, tp1);
4093 if (net->RTO == 0) {
4094 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4096 to_ticks = MSEC_TO_TICKS(net->RTO);
4098 if (net->flight_size) {
4100 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4101 sctp_timeout_handler, &net->rxt_timer);
4102 if (net->window_probe) {
4103 net->window_probe = 0;
4106 if (net->window_probe) {
4107 /* In window probes we must assure a timer
4108 * is still running there */
4109 net->window_probe = 0;
4110 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4111 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4112 sctp_timeout_handler, &net->rxt_timer);
4114 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4115 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4117 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4122 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4123 (asoc->sent_queue_retran_cnt == 0) &&
4124 (win_probe_recovered == 0) &&
4127 * huh, this should not happen unless all packets are
4128 * PR-SCTP and marked to skip of course.
4130 if (sctp_fs_audit(asoc)) {
4131 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4132 net->flight_size = 0;
4134 asoc->total_flight = 0;
4135 asoc->total_flight_count = 0;
4136 asoc->sent_queue_retran_cnt = 0;
4137 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4138 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4139 sctp_flight_size_increase(tp1);
4140 sctp_total_flight_increase(stcb, tp1);
4141 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4142 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4149 /**********************************/
4150 /* Now what about shutdown issues */
4151 /**********************************/
4152 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4153 /* nothing left on sendqueue.. consider done */
4155 if ((asoc->stream_queue_cnt == 1) &&
4156 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4157 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4158 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4159 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4161 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4162 (asoc->stream_queue_cnt == 0)) {
4163 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4164 /* Need to abort here */
4165 struct mbuf *op_err;
4170 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4171 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4172 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4175 struct sctp_nets *netp;
4177 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4178 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4179 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4181 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4182 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4183 sctp_stop_timers_for_shutdown(stcb);
4184 if (asoc->alternate) {
4185 netp = asoc->alternate;
4187 netp = asoc->primary_destination;
4189 sctp_send_shutdown(stcb, netp);
4190 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4191 stcb->sctp_ep, stcb, netp);
4192 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4193 stcb->sctp_ep, stcb, netp);
4195 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4196 (asoc->stream_queue_cnt == 0)) {
4197 struct sctp_nets *netp;
4199 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4202 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4203 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4204 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4205 sctp_stop_timers_for_shutdown(stcb);
4206 if (asoc->alternate) {
4207 netp = asoc->alternate;
4209 netp = asoc->primary_destination;
4211 sctp_send_shutdown_ack(stcb, netp);
4212 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4213 stcb->sctp_ep, stcb, netp);
4216 /*********************************************/
4217 /* Here we perform PR-SCTP procedures */
4219 /*********************************************/
4220 /* C1. update advancedPeerAckPoint */
4221 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4222 asoc->advanced_peer_ack_point = cumack;
4224 /* PR-Sctp issues need to be addressed too */
4225 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4226 struct sctp_tmit_chunk *lchk;
4227 uint32_t old_adv_peer_ack_point;
4229 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4230 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4231 /* C3. See if we need to send a Fwd-TSN */
4232 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4234 * ISSUE with ECN, see FWD-TSN processing.
4236 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4237 send_forward_tsn(stcb, asoc);
4239 /* try to FR fwd-tsn's that get lost too */
4240 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4241 send_forward_tsn(stcb, asoc);
4246 /* Assure a timer is up */
4247 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4248 stcb->sctp_ep, stcb, lchk->whoTo);
4251 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4252 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4254 stcb->asoc.peers_rwnd,
4255 stcb->asoc.total_flight,
4256 stcb->asoc.total_output_queue_size);
4261 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4262 struct sctp_tcb *stcb,
4263 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4264 int *abort_now, uint8_t flags,
4265 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4267 struct sctp_association *asoc;
4268 struct sctp_tmit_chunk *tp1, *tp2;
4269 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4270 uint16_t wake_him = 0;
4271 uint32_t send_s = 0;
4273 int accum_moved = 0;
4274 int will_exit_fast_recovery = 0;
4275 uint32_t a_rwnd, old_rwnd;
4276 int win_probe_recovery = 0;
4277 int win_probe_recovered = 0;
4278 struct sctp_nets *net = NULL;
4281 uint8_t reneged_all = 0;
4282 uint8_t cmt_dac_flag;
4285 * we take any chance we can to service our queues since we cannot
4286 * get awoken when the socket is read from :<
4289 * Now perform the actual SACK handling: 1) Verify that it is not an
4290 * old sack, if so discard. 2) If there is nothing left in the send
4291 * queue (cum-ack is equal to last acked) then you have a duplicate
4292 * too, update any rwnd change and verify no timers are running.
4293 * then return. 3) Process any new consequtive data i.e. cum-ack
4294 * moved process these first and note that it moved. 4) Process any
4295 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4296 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4297 * sync up flightsizes and things, stop all timers and also check
4298 * for shutdown_pending state. If so then go ahead and send off the
4299 * shutdown. If in shutdown recv, send off the shutdown-ack and
4300 * start that timer, Ret. 9) Strike any non-acked things and do FR
4301 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4302 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4303 * if in shutdown_recv state.
4305 SCTP_TCB_LOCK_ASSERT(stcb);
4307 this_sack_lowest_newack = 0;
4308 SCTP_STAT_INCR(sctps_slowpath_sack);
4310 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4311 #ifdef SCTP_ASOCLOG_OF_TSNS
4312 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4313 stcb->asoc.cumack_log_at++;
4314 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4315 stcb->asoc.cumack_log_at = 0;
4320 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4321 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4322 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4324 old_rwnd = stcb->asoc.peers_rwnd;
4325 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4326 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4327 stcb->asoc.overall_error_count,
4329 SCTP_FROM_SCTP_INDATA,
4332 stcb->asoc.overall_error_count = 0;
4334 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4335 sctp_log_sack(asoc->last_acked_seq,
4342 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4344 uint32_t *dupdata, dblock;
4346 for (i = 0; i < num_dup; i++) {
4347 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4348 sizeof(uint32_t), (uint8_t *) & dblock);
4349 if (dupdata == NULL) {
4352 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4356 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4357 tp1 = TAILQ_LAST(&asoc->sent_queue,
4358 sctpchunk_listhead);
4359 send_s = tp1->rec.data.TSN_seq + 1;
4362 send_s = asoc->sending_seq;
4364 if (SCTP_TSN_GE(cum_ack, send_s)) {
4365 struct mbuf *op_err;
4366 char msg[SCTP_DIAG_INFO_LEN];
4369 * no way, we have not even sent this TSN out yet. Peer is
4370 * hopelessly messed up with us.
4372 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4375 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4376 tp1->rec.data.TSN_seq, (void *)tp1);
4381 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4383 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4384 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4385 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4388 /**********************/
4389 /* 1) check the range */
4390 /**********************/
4391 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4392 /* acking something behind */
4395 /* update the Rwnd of the peer */
4396 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4397 TAILQ_EMPTY(&asoc->send_queue) &&
4398 (asoc->stream_queue_cnt == 0)) {
4399 /* nothing left on send/sent and strmq */
4400 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4401 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4402 asoc->peers_rwnd, 0, 0, a_rwnd);
4404 asoc->peers_rwnd = a_rwnd;
4405 if (asoc->sent_queue_retran_cnt) {
4406 asoc->sent_queue_retran_cnt = 0;
4408 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4409 /* SWS sender side engages */
4410 asoc->peers_rwnd = 0;
4412 /* stop any timers */
4413 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4414 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4415 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4416 net->partial_bytes_acked = 0;
4417 net->flight_size = 0;
4419 asoc->total_flight = 0;
4420 asoc->total_flight_count = 0;
4424 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4425 * things. The total byte count acked is tracked in netAckSz AND
4426 * netAck2 is used to track the total bytes acked that are un-
4427 * amibguious and were never retransmitted. We track these on a per
4428 * destination address basis.
4430 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4431 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4432 /* Drag along the window_tsn for cwr's */
4433 net->cwr_window_tsn = cum_ack;
4435 net->prev_cwnd = net->cwnd;
4440 * CMT: Reset CUC and Fast recovery algo variables before
4443 net->new_pseudo_cumack = 0;
4444 net->will_exit_fast_recovery = 0;
4445 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4446 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4449 /* process the new consecutive TSN first */
4450 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4451 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4452 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4454 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4456 * If it is less than ACKED, it is
4457 * now no-longer in flight. Higher
4458 * values may occur during marking
4460 if ((tp1->whoTo->dest_state &
4461 SCTP_ADDR_UNCONFIRMED) &&
4462 (tp1->snd_count < 2)) {
4464 * If there was no retran
4465 * and the address is
4466 * un-confirmed and we sent
4468 * sacked.. its confirmed,
4471 tp1->whoTo->dest_state &=
4472 ~SCTP_ADDR_UNCONFIRMED;
4474 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4475 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4476 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4477 tp1->whoTo->flight_size,
4479 (uint32_t) (uintptr_t) tp1->whoTo,
4480 tp1->rec.data.TSN_seq);
4482 sctp_flight_size_decrease(tp1);
4483 sctp_total_flight_decrease(stcb, tp1);
4484 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4485 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4489 tp1->whoTo->net_ack += tp1->send_size;
4491 /* CMT SFR and DAC algos */
4492 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4493 tp1->whoTo->saw_newack = 1;
4495 if (tp1->snd_count < 2) {
4497 * True non-retransmited
4500 tp1->whoTo->net_ack2 +=
4503 /* update RTO too? */
4507 sctp_calculate_rto(stcb,
4509 &tp1->sent_rcv_time,
4510 sctp_align_safe_nocopy,
4511 SCTP_RTT_FROM_DATA);
4514 if (tp1->whoTo->rto_needed == 0) {
4515 tp1->whoTo->rto_needed = 1;
4521 * CMT: CUCv2 algorithm. From the
4522 * cumack'd TSNs, for each TSN being
4523 * acked for the first time, set the
4524 * following variables for the
4525 * corresp destination.
4526 * new_pseudo_cumack will trigger a
4528 * find_(rtx_)pseudo_cumack will
4529 * trigger search for the next
4530 * expected (rtx-)pseudo-cumack.
4532 tp1->whoTo->new_pseudo_cumack = 1;
4533 tp1->whoTo->find_pseudo_cumack = 1;
4534 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4537 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4538 sctp_log_sack(asoc->last_acked_seq,
4540 tp1->rec.data.TSN_seq,
4543 SCTP_LOG_TSN_ACKED);
4545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4546 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4549 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4550 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4551 #ifdef SCTP_AUDITING_ENABLED
4552 sctp_audit_log(0xB3,
4553 (asoc->sent_queue_retran_cnt & 0x000000ff));
4556 if (tp1->rec.data.chunk_was_revoked) {
4557 /* deflate the cwnd */
4558 tp1->whoTo->cwnd -= tp1->book_size;
4559 tp1->rec.data.chunk_was_revoked = 0;
4561 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4562 tp1->sent = SCTP_DATAGRAM_ACKED;
4569 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4570 /* always set this up to cum-ack */
4571 asoc->this_sack_highest_gap = last_tsn;
4573 if ((num_seg > 0) || (num_nr_seg > 0)) {
4576 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4577 * to be greater than the cumack. Also reset saw_newack to 0
4580 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4581 net->saw_newack = 0;
4582 net->this_sack_highest_newack = last_tsn;
4586 * thisSackHighestGap will increase while handling NEW
4587 * segments this_sack_highest_newack will increase while
4588 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4589 * used for CMT DAC algo. saw_newack will also change.
4591 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4592 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4593 num_seg, num_nr_seg, &rto_ok)) {
4597 * validate the biggest_tsn_acked in the gap acks if strict
4598 * adherence is wanted.
4600 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4602 * peer is either confused or we are under attack.
4605 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4606 biggest_tsn_acked, send_s);
4610 /*******************************************/
4611 /* cancel ALL T3-send timer if accum moved */
4612 /*******************************************/
4613 if (asoc->sctp_cmt_on_off > 0) {
4614 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4615 if (net->new_pseudo_cumack)
4616 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4618 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4623 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4624 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4625 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4629 /********************************************/
4630 /* drop the acked chunks from the sentqueue */
4631 /********************************************/
4632 asoc->last_acked_seq = cum_ack;
4634 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4635 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4638 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4639 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4640 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4643 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4647 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4648 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4649 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4650 asoc->trigger_reset = 1;
4652 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4653 if (PR_SCTP_ENABLED(tp1->flags)) {
4654 if (asoc->pr_sctp_cnt != 0)
4655 asoc->pr_sctp_cnt--;
4657 asoc->sent_queue_cnt--;
4659 /* sa_ignore NO_NULL_CHK */
4660 sctp_free_bufspace(stcb, asoc, tp1, 1);
4661 sctp_m_freem(tp1->data);
4663 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4664 asoc->sent_queue_cnt_removeable--;
4667 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4668 sctp_log_sack(asoc->last_acked_seq,
4670 tp1->rec.data.TSN_seq,
4673 SCTP_LOG_FREE_SENT);
4675 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4678 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4680 panic("Warning flight size is positive and should be 0");
4682 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4683 asoc->total_flight);
4685 asoc->total_flight = 0;
4687 /* sa_ignore NO_NULL_CHK */
4688 if ((wake_him) && (stcb->sctp_socket)) {
4689 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4693 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4694 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4695 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4697 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4698 so = SCTP_INP_SO(stcb->sctp_ep);
4699 atomic_add_int(&stcb->asoc.refcnt, 1);
4700 SCTP_TCB_UNLOCK(stcb);
4701 SCTP_SOCKET_LOCK(so, 1);
4702 SCTP_TCB_LOCK(stcb);
4703 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4704 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4705 /* assoc was freed while we were unlocked */
4706 SCTP_SOCKET_UNLOCK(so, 1);
4710 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4711 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4712 SCTP_SOCKET_UNLOCK(so, 1);
4715 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4716 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4720 if (asoc->fast_retran_loss_recovery && accum_moved) {
4721 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4722 /* Setup so we will exit RFC2582 fast recovery */
4723 will_exit_fast_recovery = 1;
4727 * Check for revoked fragments:
4729 * if Previous sack - Had no frags then we can't have any revoked if
4730 * Previous sack - Had frag's then - If we now have frags aka
4731 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4732 * some of them. else - The peer revoked all ACKED fragments, since
4733 * we had some before and now we have NONE.
4737 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4738 asoc->saw_sack_with_frags = 1;
4739 } else if (asoc->saw_sack_with_frags) {
4740 int cnt_revoked = 0;
4742 /* Peer revoked all dg's marked or acked */
4743 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4744 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4745 tp1->sent = SCTP_DATAGRAM_SENT;
4746 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4747 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4748 tp1->whoTo->flight_size,
4750 (uint32_t) (uintptr_t) tp1->whoTo,
4751 tp1->rec.data.TSN_seq);
4753 sctp_flight_size_increase(tp1);
4754 sctp_total_flight_increase(stcb, tp1);
4755 tp1->rec.data.chunk_was_revoked = 1;
4757 * To ensure that this increase in
4758 * flightsize, which is artificial, does not
4759 * throttle the sender, we also increase the
4760 * cwnd artificially.
4762 tp1->whoTo->cwnd += tp1->book_size;
4769 asoc->saw_sack_with_frags = 0;
4772 asoc->saw_sack_with_nr_frags = 1;
4774 asoc->saw_sack_with_nr_frags = 0;
4776 /* JRS - Use the congestion control given in the CC module */
4777 if (ecne_seen == 0) {
4778 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4779 if (net->net_ack2 > 0) {
4781 * Karn's rule applies to clearing error
4782 * count, this is optional.
4784 net->error_count = 0;
4785 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4786 /* addr came good */
4787 net->dest_state |= SCTP_ADDR_REACHABLE;
4788 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4789 0, (void *)net, SCTP_SO_NOT_LOCKED);
4791 if (net == stcb->asoc.primary_destination) {
4792 if (stcb->asoc.alternate) {
4793 /* release the alternate,
4794 * primary is good */
4795 sctp_free_remote_addr(stcb->asoc.alternate);
4796 stcb->asoc.alternate = NULL;
4799 if (net->dest_state & SCTP_ADDR_PF) {
4800 net->dest_state &= ~SCTP_ADDR_PF;
4801 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4802 stcb->sctp_ep, stcb, net,
4803 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4804 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4805 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4806 /* Done with this net */
4809 /* restore any doubled timers */
4810 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4811 if (net->RTO < stcb->asoc.minrto) {
4812 net->RTO = stcb->asoc.minrto;
4814 if (net->RTO > stcb->asoc.maxrto) {
4815 net->RTO = stcb->asoc.maxrto;
4819 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4821 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4822 /* nothing left in-flight */
4823 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4824 /* stop all timers */
4825 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4827 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4828 net->flight_size = 0;
4829 net->partial_bytes_acked = 0;
4831 asoc->total_flight = 0;
4832 asoc->total_flight_count = 0;
4834 /**********************************/
4835 /* Now what about shutdown issues */
4836 /**********************************/
4837 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4838 /* nothing left on sendqueue.. consider done */
4839 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4840 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4841 asoc->peers_rwnd, 0, 0, a_rwnd);
4843 asoc->peers_rwnd = a_rwnd;
4844 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4845 /* SWS sender side engages */
4846 asoc->peers_rwnd = 0;
4849 if ((asoc->stream_queue_cnt == 1) &&
4850 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4851 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4852 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4853 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4855 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4856 (asoc->stream_queue_cnt == 0)) {
4857 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4858 /* Need to abort here */
4859 struct mbuf *op_err;
4864 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4865 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4866 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4869 struct sctp_nets *netp;
4871 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4872 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4873 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4875 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4876 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4877 sctp_stop_timers_for_shutdown(stcb);
4878 if (asoc->alternate) {
4879 netp = asoc->alternate;
4881 netp = asoc->primary_destination;
4883 sctp_send_shutdown(stcb, netp);
4884 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4885 stcb->sctp_ep, stcb, netp);
4886 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4887 stcb->sctp_ep, stcb, netp);
4890 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4891 (asoc->stream_queue_cnt == 0)) {
4892 struct sctp_nets *netp;
4894 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4897 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4898 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4899 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4900 sctp_stop_timers_for_shutdown(stcb);
4901 if (asoc->alternate) {
4902 netp = asoc->alternate;
4904 netp = asoc->primary_destination;
4906 sctp_send_shutdown_ack(stcb, netp);
4907 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4908 stcb->sctp_ep, stcb, netp);
4913 * Now here we are going to recycle net_ack for a different use...
4916 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4921 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4922 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4923 * automatically ensure that.
4925 if ((asoc->sctp_cmt_on_off > 0) &&
4926 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4927 (cmt_dac_flag == 0)) {
4928 this_sack_lowest_newack = cum_ack;
4930 if ((num_seg > 0) || (num_nr_seg > 0)) {
4931 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4932 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4934 /* JRS - Use the congestion control given in the CC module */
4935 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4937 /* Now are we exiting loss recovery ? */
4938 if (will_exit_fast_recovery) {
4939 /* Ok, we must exit fast recovery */
4940 asoc->fast_retran_loss_recovery = 0;
4942 if ((asoc->sat_t3_loss_recovery) &&
4943 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4944 /* end satellite t3 loss recovery */
4945 asoc->sat_t3_loss_recovery = 0;
4950 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4951 if (net->will_exit_fast_recovery) {
4952 /* Ok, we must exit fast recovery */
4953 net->fast_retran_loss_recovery = 0;
4957 /* Adjust and set the new rwnd value */
4958 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4959 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4960 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4962 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4963 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4964 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4965 /* SWS sender side engages */
4966 asoc->peers_rwnd = 0;
4968 if (asoc->peers_rwnd > old_rwnd) {
4969 win_probe_recovery = 1;
4972 * Now we must setup so we have a timer up for anyone with
4978 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4979 if (win_probe_recovery && (net->window_probe)) {
4980 win_probe_recovered = 1;
4982 * Find first chunk that was used with
4983 * window probe and clear the event. Put
4984 * it back into the send queue as if has
4987 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4988 if (tp1->window_probe) {
4989 sctp_window_probe_recovery(stcb, asoc, tp1);
4994 if (net->flight_size) {
4996 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4997 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4998 stcb->sctp_ep, stcb, net);
5000 if (net->window_probe) {
5001 net->window_probe = 0;
5004 if (net->window_probe) {
5005 /* In window probes we must assure a timer
5006 * is still running there */
5007 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5008 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5009 stcb->sctp_ep, stcb, net);
5012 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5013 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5015 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5020 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5021 (asoc->sent_queue_retran_cnt == 0) &&
5022 (win_probe_recovered == 0) &&
5025 * huh, this should not happen unless all packets are
5026 * PR-SCTP and marked to skip of course.
5028 if (sctp_fs_audit(asoc)) {
5029 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5030 net->flight_size = 0;
5032 asoc->total_flight = 0;
5033 asoc->total_flight_count = 0;
5034 asoc->sent_queue_retran_cnt = 0;
5035 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5036 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5037 sctp_flight_size_increase(tp1);
5038 sctp_total_flight_increase(stcb, tp1);
5039 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5040 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5047 /*********************************************/
5048 /* Here we perform PR-SCTP procedures */
5050 /*********************************************/
5051 /* C1. update advancedPeerAckPoint */
5052 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5053 asoc->advanced_peer_ack_point = cum_ack;
5055 /* C2. try to further move advancedPeerAckPoint ahead */
5056 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5057 struct sctp_tmit_chunk *lchk;
5058 uint32_t old_adv_peer_ack_point;
5060 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5061 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5062 /* C3. See if we need to send a Fwd-TSN */
5063 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5065 * ISSUE with ECN, see FWD-TSN processing.
5067 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5068 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5069 0xee, cum_ack, asoc->advanced_peer_ack_point,
5070 old_adv_peer_ack_point);
5072 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5073 send_forward_tsn(stcb, asoc);
5075 /* try to FR fwd-tsn's that get lost too */
5076 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5077 send_forward_tsn(stcb, asoc);
5082 /* Assure a timer is up */
5083 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5084 stcb->sctp_ep, stcb, lchk->whoTo);
5087 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5088 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5090 stcb->asoc.peers_rwnd,
5091 stcb->asoc.total_flight,
5092 stcb->asoc.total_output_queue_size);
5097 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5100 uint32_t cum_ack, a_rwnd;
5102 cum_ack = ntohl(cp->cumulative_tsn_ack);
5103 /* Arrange so a_rwnd does NOT change */
5104 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5106 /* Now call the express sack handling */
5107 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5111 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5112 struct sctp_stream_in *strmin)
5114 struct sctp_queued_to_read *ctl, *nctl;
5115 struct sctp_association *asoc;
5117 int need_reasm_check = 0, old;
5120 tt = strmin->last_sequence_delivered;
5121 if (asoc->idata_supported) {
5127 * First deliver anything prior to and including the stream no that
5130 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5131 if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
5132 /* this is deliverable now */
5133 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5134 if (ctl->on_strm_q) {
5135 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5136 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5137 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5138 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5141 panic("strmin: %p ctl: %p unknown %d",
5142 strmin, ctl, ctl->on_strm_q);
5147 /* subtract pending on streams */
5148 asoc->size_on_all_streams -= ctl->length;
5149 sctp_ucount_decr(asoc->cnt_on_all_streams);
5150 /* deliver it to at least the delivery-q */
5151 if (stcb->sctp_socket) {
5152 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5153 sctp_add_to_readq(stcb->sctp_ep, stcb,
5155 &stcb->sctp_socket->so_rcv,
5156 1, SCTP_READ_LOCK_HELD,
5157 SCTP_SO_NOT_LOCKED);
5160 /* Its a fragmented message */
5161 if (ctl->first_frag_seen) {
5162 /* Make it so this is next to
5163 * deliver, we restore later */
5164 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5165 need_reasm_check = 1;
5170 /* no more delivery now. */
5174 if (need_reasm_check) {
5177 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5178 if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
5179 /* Restore the next to deliver unless we are ahead */
5180 strmin->last_sequence_delivered = tt;
5183 /* Left the front Partial one on */
5186 need_reasm_check = 0;
5189 * now we must deliver things in queue the normal way if any are
5192 tt = strmin->last_sequence_delivered + 1;
5193 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5194 if (tt == ctl->sinfo_ssn) {
5195 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5196 /* this is deliverable now */
5197 if (ctl->on_strm_q) {
5198 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5199 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5200 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5201 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5204 panic("strmin: %p ctl: %p unknown %d",
5205 strmin, ctl, ctl->on_strm_q);
5210 /* subtract pending on streams */
5211 asoc->size_on_all_streams -= ctl->length;
5212 sctp_ucount_decr(asoc->cnt_on_all_streams);
5213 /* deliver it to at least the delivery-q */
5214 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5215 if (stcb->sctp_socket) {
5216 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5217 sctp_add_to_readq(stcb->sctp_ep, stcb,
5219 &stcb->sctp_socket->so_rcv, 1,
5220 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5223 tt = strmin->last_sequence_delivered + 1;
5225 /* Its a fragmented message */
5226 if (ctl->first_frag_seen) {
5227 /* Make it so this is next to
5229 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5230 need_reasm_check = 1;
5238 if (need_reasm_check) {
5239 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5246 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5247 struct sctp_association *asoc,
5248 uint16_t stream, uint32_t seq, int ordered, int old, uint32_t cumtsn)
5250 struct sctp_queued_to_read *control;
5251 struct sctp_stream_in *strm;
5252 struct sctp_tmit_chunk *chk, *nchk;
5253 int cnt_removed = 0;
5256 * For now large messages held on the stream reasm that are complete
5257 * will be tossed too. We could in theory do more work to spin
5258 * through and stop after dumping one msg aka seeing the start of a
5259 * new msg at the head, and call the delivery function... to see if
5260 * it can be delivered... But for now we just dump everything on the
5263 strm = &asoc->strmin[stream];
5264 control = sctp_find_reasm_entry(strm, (uint32_t) seq, ordered, old);
5265 if (control == NULL) {
5269 if (old && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5272 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5273 /* Purge hanging chunks */
5274 if (old && (ordered == 0)) {
5275 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, cumtsn)) {
5280 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5281 asoc->size_on_reasm_queue -= chk->send_size;
5282 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5284 sctp_m_freem(chk->data);
5287 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5289 if (!TAILQ_EMPTY(&control->reasm)) {
5290 /* This has to be old data, unordered */
5291 if (control->data) {
5292 sctp_m_freem(control->data);
5293 control->data = NULL;
5295 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5296 chk = TAILQ_FIRST(&control->reasm);
5297 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5298 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5299 sctp_add_chk_to_control(control, strm, stcb, asoc,
5300 chk, SCTP_READ_LOCK_HELD);
5302 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5305 if (control->on_strm_q == SCTP_ON_ORDERED) {
5306 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5307 control->on_strm_q = 0;
5308 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5309 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5310 control->on_strm_q = 0;
5312 } else if (control->on_strm_q) {
5313 panic("strm: %p ctl: %p unknown %d",
5314 strm, control, control->on_strm_q);
5317 control->on_strm_q = 0;
5318 if (control->on_read_q == 0) {
5319 sctp_free_remote_addr(control->whoFrom);
5320 if (control->data) {
5321 sctp_m_freem(control->data);
5322 control->data = NULL;
5324 sctp_free_a_readq(stcb, control);
5329 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5330 struct sctp_forward_tsn_chunk *fwd,
5331 int *abort_flag, struct mbuf *m, int offset)
5333 /* The pr-sctp fwd tsn */
5335 * here we will perform all the data receiver side steps for
5336 * processing FwdTSN, as required in by pr-sctp draft:
5338 * Assume we get FwdTSN(x):
5340 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5341 * + others we have 3) examine and update re-ordering queue on
5342 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5343 * report where we are.
5345 struct sctp_association *asoc;
5346 uint32_t new_cum_tsn, gap;
5347 unsigned int i, fwd_sz, m_size;
5349 struct sctp_stream_in *strm;
5350 struct sctp_queued_to_read *ctl, *sv;
5353 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5354 SCTPDBG(SCTP_DEBUG_INDATA1,
5355 "Bad size too small/big fwd-tsn\n");
5358 m_size = (stcb->asoc.mapping_array_size << 3);
5359 /*************************************************************/
5360 /* 1. Here we update local cumTSN and shift the bitmap array */
5361 /*************************************************************/
5362 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5364 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5365 /* Already got there ... */
5369 * now we know the new TSN is more advanced, let's find the actual
5372 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5373 asoc->cumulative_tsn = new_cum_tsn;
5374 if (gap >= m_size) {
5375 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5376 struct mbuf *op_err;
5377 char msg[SCTP_DIAG_INFO_LEN];
5380 * out of range (of single byte chunks in the rwnd I
5381 * give out). This must be an attacker.
5384 snprintf(msg, sizeof(msg),
5385 "New cum ack %8.8x too high, highest TSN %8.8x",
5386 new_cum_tsn, asoc->highest_tsn_inside_map);
5387 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5388 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5389 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5392 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5394 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5395 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5396 asoc->highest_tsn_inside_map = new_cum_tsn;
5398 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5399 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5401 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5402 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5405 SCTP_TCB_LOCK_ASSERT(stcb);
5406 for (i = 0; i <= gap; i++) {
5407 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5408 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5409 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5410 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5411 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5416 /*************************************************************/
5417 /* 2. Clear up re-assembly queue */
5418 /*************************************************************/
5420 /* This is now done as part of clearing up the stream/seq */
5421 if (asoc->idata_supported == 0) {
5424 /* Flush all the un-ordered data based on cum-tsn */
5425 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5426 for (sid = 0; sid < asoc->streamincnt; sid++) {
5427 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, 1, new_cum_tsn);
5429 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5431 /*******************************************************/
5432 /* 3. Update the PR-stream re-ordering queues and fix */
5433 /* delivery issues as needed. */
5434 /*******************************************************/
5435 fwd_sz -= sizeof(*fwd);
5438 unsigned int num_str;
5441 uint16_t ordered, flags;
5443 struct sctp_strseq *stseq, strseqbuf;
5444 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5446 offset += sizeof(*fwd);
5448 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5449 if (asoc->idata_supported) {
5450 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5453 num_str = fwd_sz / sizeof(struct sctp_strseq);
5456 for (i = 0; i < num_str; i++) {
5457 if (asoc->idata_supported) {
5458 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5459 sizeof(struct sctp_strseq_mid),
5460 (uint8_t *) & strseqbuf_m);
5461 offset += sizeof(struct sctp_strseq_mid);
5462 if (stseq_m == NULL) {
5465 stream = ntohs(stseq_m->stream);
5466 sequence = ntohl(stseq_m->msg_id);
5467 flags = ntohs(stseq_m->flags);
5468 if (flags & PR_SCTP_UNORDERED_FLAG) {
5474 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5475 sizeof(struct sctp_strseq),
5476 (uint8_t *) & strseqbuf);
5477 offset += sizeof(struct sctp_strseq);
5478 if (stseq == NULL) {
5481 stream = ntohs(stseq->stream);
5482 sequence = (uint32_t) ntohs(stseq->sequence);
5490 * Ok we now look for the stream/seq on the read
5491 * queue where its not all delivered. If we find it
5492 * we transmute the read entry into a PDI_ABORTED.
5494 if (stream >= asoc->streamincnt) {
5495 /* screwed up streams, stop! */
5498 if ((asoc->str_of_pdapi == stream) &&
5499 (asoc->ssn_of_pdapi == sequence)) {
5501 * If this is the one we were partially
5502 * delivering now then we no longer are.
5503 * Note this will change with the reassembly
5506 asoc->fragmented_delivery_inprogress = 0;
5508 strm = &asoc->strmin[stream];
5509 if (asoc->idata_supported == 0) {
5512 for (strm_at = strm->last_sequence_delivered; SCTP_MSGID_GE(1, sequence, strm_at); strm_at++) {
5513 sctp_flush_reassm_for_str_seq(stcb, asoc, stream, strm_at, ordered, old, new_cum_tsn);
5518 for (strm_at = strm->last_sequence_delivered; SCTP_MSGID_GE(0, sequence, strm_at); strm_at++) {
5519 sctp_flush_reassm_for_str_seq(stcb, asoc, stream, strm_at, ordered, old, new_cum_tsn);
5522 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5523 if ((ctl->sinfo_stream == stream) &&
5524 (ctl->sinfo_ssn == sequence)) {
5525 str_seq = (stream << 16) | (0x0000ffff & sequence);
5526 ctl->pdapi_aborted = 1;
5527 sv = stcb->asoc.control_pdapi;
5529 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5530 TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5531 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5532 TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5534 } else if (ctl->on_strm_q) {
5535 panic("strm: %p ctl: %p unknown %d",
5536 strm, ctl, ctl->on_strm_q);
5540 stcb->asoc.control_pdapi = ctl;
5541 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5543 SCTP_PARTIAL_DELIVERY_ABORTED,
5545 SCTP_SO_NOT_LOCKED);
5546 stcb->asoc.control_pdapi = sv;
5548 } else if ((ctl->sinfo_stream == stream) &&
5549 SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
5550 /* We are past our victim SSN */
5554 if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
5555 /* Update the sequence number */
5556 strm->last_sequence_delivered = sequence;
5558 /* now kick the stream the new way */
5559 /* sa_ignore NO_NULL_CHK */
5560 sctp_kick_prsctp_reorder_queue(stcb, strm);
5562 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5565 * Now slide thing forward.
5567 sctp_slide_mapping_arrays(stcb);