2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
54 * NOTES: On the outbound side of things I need to check the sack timer to
55 * see if I should generate a sack into the chunk queue (if I have data to
56 * send that is and will be sending it .. for bundling.
58 * The callback in sctp_usrreq.c will get called when the socket is read from.
59 * This will cause sctp_service_queues() to get called on the top entry in
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64 struct sctp_stream_in *strm,
65 struct sctp_tcb *stcb,
66 struct sctp_association *asoc,
67 struct sctp_tmit_chunk *chk, int lock_held);
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 /* Calculate what the rwnd would be */
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
83 * This is really set wrong with respect to a 1-2-m socket. Since
84 * the sb_cc is the count that everyone as put up. When we re-write
85 * sctp_soreceive then we will fix this so that ONLY this
86 * associations data is taken into account.
88 if (stcb->sctp_socket == NULL) {
91 if (stcb->asoc.sb_cc == 0 &&
92 asoc->size_on_reasm_queue == 0 &&
93 asoc->size_on_all_streams == 0) {
94 /* Full rwnd granted */
95 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
98 /* get actual space */
99 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
101 * take out what has NOT been put on socket queue and we yet hold
104 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 asoc->cnt_on_reasm_queue * MSIZE));
106 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 asoc->cnt_on_all_streams * MSIZE));
112 /* what is the overhead of all these rwnd's */
113 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
115 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 * even it is 0. SWS engaged
118 if (calc < stcb->asoc.my_rwnd_control_len) {
127 * Build out our readq entry based on the incoming packet.
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131 struct sctp_nets *net,
132 uint32_t tsn, uint32_t ppid,
133 uint32_t context, uint16_t stream_no,
134 uint32_t stream_seq, uint8_t flags,
137 struct sctp_queued_to_read *read_queue_e = NULL;
139 sctp_alloc_a_readq(stcb, read_queue_e);
140 if (read_queue_e == NULL) {
143 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 read_queue_e->sinfo_stream = stream_no;
145 read_queue_e->sinfo_ssn = stream_seq;
146 read_queue_e->sinfo_flags = (flags << 8);
147 read_queue_e->sinfo_ppid = ppid;
148 read_queue_e->sinfo_context = context;
149 read_queue_e->sinfo_tsn = tsn;
150 read_queue_e->sinfo_cumtsn = tsn;
151 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
152 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 TAILQ_INIT(&read_queue_e->reasm);
154 read_queue_e->whoFrom = net;
155 atomic_add_int(&net->ref_count, 1);
156 read_queue_e->data = dm;
157 read_queue_e->stcb = stcb;
158 read_queue_e->port_from = stcb->rport;
160 return (read_queue_e);
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
166 struct sctp_extrcvinfo *seinfo;
167 struct sctp_sndrcvinfo *outinfo;
168 struct sctp_rcvinfo *rcvinfo;
169 struct sctp_nxtinfo *nxtinfo;
176 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 /* user does not want any ancillary data */
183 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
186 seinfo = (struct sctp_extrcvinfo *)sinfo;
187 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
190 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
197 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
200 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
206 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
211 SCTP_BUF_LEN(ret) = 0;
213 /* We need a CMSG header followed by the struct */
214 cmh = mtod(ret, struct cmsghdr *);
216 * Make sure that there is no un-initialized padding between the
217 * cmsg header and cmsg data and after the cmsg data.
220 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 cmh->cmsg_level = IPPROTO_SCTP;
222 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 cmh->cmsg_type = SCTP_RCVINFO;
224 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 rcvinfo->rcv_context = sinfo->sinfo_context;
232 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
237 cmh->cmsg_level = IPPROTO_SCTP;
238 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 cmh->cmsg_type = SCTP_NXTINFO;
240 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 nxtinfo->nxt_flags = 0;
243 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 nxtinfo->nxt_flags |= SCTP_UNORDERED;
246 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 nxtinfo->nxt_flags |= SCTP_COMPLETE;
252 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 cmh->cmsg_level = IPPROTO_SCTP;
260 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
262 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 cmh->cmsg_type = SCTP_EXTRCV;
264 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 cmh->cmsg_type = SCTP_SNDRCV;
270 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
280 uint32_t gap, i, cumackp1;
282 int in_r = 0, in_nr = 0;
284 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
287 cumackp1 = asoc->cumulative_tsn + 1;
288 if (SCTP_TSN_GT(cumackp1, tsn)) {
290 * this tsn is behind the cum ack and thus we don't need to
291 * worry about it being moved from one to the other.
295 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 if ((in_r == 0) && (in_nr == 0)) {
300 panic("Things are really messed up now");
302 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 sctp_print_mapping_array(asoc);
307 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
309 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 asoc->highest_tsn_inside_nr_map = tsn;
313 if (tsn == asoc->highest_tsn_inside_map) {
314 /* We must back down to see what the new highest is */
315 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 asoc->highest_tsn_inside_map = i;
324 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331 struct sctp_association *asoc,
332 struct sctp_queued_to_read *control)
334 struct sctp_queued_to_read *at;
335 struct sctp_readhead *q;
336 uint8_t bits, unordered;
338 bits = (control->sinfo_flags >> 8);
339 unordered = bits & SCTP_DATA_UNORDERED;
341 q = &strm->uno_inqueue;
342 if (asoc->idata_supported == 0) {
343 if (!TAILQ_EMPTY(q)) {
345 * Only one stream can be here in old style
350 TAILQ_INSERT_TAIL(q, control, next_instrm);
351 control->on_strm_q = SCTP_ON_UNORDERED;
357 if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
360 if (TAILQ_EMPTY(q)) {
362 TAILQ_INSERT_HEAD(q, control, next_instrm);
364 control->on_strm_q = SCTP_ON_UNORDERED;
366 control->on_strm_q = SCTP_ON_ORDERED;
370 TAILQ_FOREACH(at, q, next_instrm) {
371 if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
373 * one in queue is bigger than the new one,
374 * insert before this one
376 TAILQ_INSERT_BEFORE(at, control, next_instrm);
378 control->on_strm_q = SCTP_ON_UNORDERED;
380 control->on_strm_q = SCTP_ON_ORDERED;
383 } else if (at->msg_id == control->msg_id) {
385 * Gak, He sent me a duplicate msg id
386 * number?? return -1 to abort.
390 if (TAILQ_NEXT(at, next_instrm) == NULL) {
392 * We are at the end, insert it
395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 sctp_log_strm_del(control, at,
397 SCTP_STR_LOG_FROM_INSERT_TL);
399 TAILQ_INSERT_AFTER(q,
400 at, control, next_instrm);
402 control->on_strm_q = SCTP_ON_UNORDERED;
404 control->on_strm_q = SCTP_ON_ORDERED;
415 sctp_abort_in_reasm(struct sctp_tcb *stcb,
416 struct sctp_queued_to_read *control,
417 struct sctp_tmit_chunk *chk,
418 int *abort_flag, int opspot)
420 char msg[SCTP_DIAG_INFO_LEN];
423 if (stcb->asoc.idata_supported) {
424 snprintf(msg, sizeof(msg),
425 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
427 control->fsn_included,
428 chk->rec.data.TSN_seq,
429 chk->rec.data.stream_number,
430 chk->rec.data.fsn_num, chk->rec.data.stream_seq);
432 snprintf(msg, sizeof(msg),
433 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
435 control->fsn_included,
436 chk->rec.data.TSN_seq,
437 chk->rec.data.stream_number,
438 chk->rec.data.fsn_num,
439 (uint16_t) chk->rec.data.stream_seq);
441 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
442 sctp_m_freem(chk->data);
444 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
445 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
446 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
451 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
454 * The control could not be placed and must be cleaned.
456 struct sctp_tmit_chunk *chk, *nchk;
458 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
459 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
461 sctp_m_freem(chk->data);
463 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
465 sctp_free_a_readq(stcb, control);
469 * Queue the chunk either right into the socket buffer if it is the next one
470 * to go OR put it in the correct place in the delivery queue. If we do
471 * append to the so_buf, keep doing so until we are out of order as
472 * long as the control's entered are non-fragmented.
475 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
476 struct sctp_stream_in *strm,
477 struct sctp_association *asoc,
478 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
481 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
482 * all the data in one stream this could happen quite rapidly. One
483 * could use the TSN to keep track of things, but this scheme breaks
484 * down in the other type of stream usage that could occur. Send a
485 * single msg to stream 0, send 4Billion messages to stream 1, now
486 * send a message to stream 0. You have a situation where the TSN
487 * has wrapped but not in the stream. Is this worth worrying about
488 * or should we just change our queue sort at the bottom to be by
491 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
492 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
493 * assignment this could happen... and I don't see how this would be
494 * a violation. So for now I am undecided an will leave the sort by
495 * SSN alone. Maybe a hybred approach is the answer
498 struct sctp_queued_to_read *at;
502 char msg[SCTP_DIAG_INFO_LEN];
504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
505 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
507 if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
508 /* The incoming sseq is behind where we last delivered? */
509 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
510 control->sinfo_ssn, strm->last_sequence_delivered);
513 * throw it in the stream so it gets cleaned up in
514 * association destruction
516 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
517 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
518 strm->last_sequence_delivered, control->sinfo_tsn,
519 control->sinfo_stream, control->sinfo_ssn);
520 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
521 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
522 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
527 if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
531 asoc->size_on_all_streams += control->length;
532 sctp_ucount_incr(asoc->cnt_on_all_streams);
533 nxt_todel = strm->last_sequence_delivered + 1;
534 if (nxt_todel == control->sinfo_ssn) {
535 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
538 so = SCTP_INP_SO(stcb->sctp_ep);
539 atomic_add_int(&stcb->asoc.refcnt, 1);
540 SCTP_TCB_UNLOCK(stcb);
541 SCTP_SOCKET_LOCK(so, 1);
543 atomic_subtract_int(&stcb->asoc.refcnt, 1);
544 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
545 SCTP_SOCKET_UNLOCK(so, 1);
549 /* can be delivered right away? */
550 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
551 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
553 /* EY it wont be queued if it could be delivered directly */
555 asoc->size_on_all_streams -= control->length;
556 sctp_ucount_decr(asoc->cnt_on_all_streams);
557 strm->last_sequence_delivered++;
558 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
559 sctp_add_to_readq(stcb->sctp_ep, stcb,
561 &stcb->sctp_socket->so_rcv, 1,
562 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
563 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
565 nxt_todel = strm->last_sequence_delivered + 1;
566 if ((nxt_todel == control->sinfo_ssn) &&
567 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
568 asoc->size_on_all_streams -= control->length;
569 sctp_ucount_decr(asoc->cnt_on_all_streams);
570 if (control->on_strm_q == SCTP_ON_ORDERED) {
571 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
574 panic("Huh control: %p is on_strm_q: %d",
575 control, control->on_strm_q);
578 control->on_strm_q = 0;
579 strm->last_sequence_delivered++;
581 * We ignore the return of deliver_data here
582 * since we always can hold the chunk on the
583 * d-queue. And we have a finite number that
584 * can be delivered from the strq.
586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
587 sctp_log_strm_del(control, NULL,
588 SCTP_STR_LOG_FROM_IMMED_DEL);
590 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
591 sctp_add_to_readq(stcb->sctp_ep, stcb,
593 &stcb->sctp_socket->so_rcv, 1,
594 SCTP_READ_LOCK_NOT_HELD,
597 } else if (nxt_todel == control->sinfo_ssn) {
602 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
603 SCTP_SOCKET_UNLOCK(so, 1);
608 * Ok, we did not deliver this guy, find the correct place
609 * to put it on the queue.
611 if (sctp_place_control_in_stream(strm, asoc, control)) {
612 snprintf(msg, sizeof(msg),
613 "Queue to str msg_id: %u duplicate",
615 sctp_clean_up_control(stcb, control);
616 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
617 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
618 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
626 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
628 struct mbuf *m, *prev = NULL;
629 struct sctp_tcb *stcb;
631 stcb = control->stcb;
632 control->held_length = 0;
636 if (SCTP_BUF_LEN(m) == 0) {
637 /* Skip mbufs with NO length */
640 control->data = sctp_m_free(m);
643 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
644 m = SCTP_BUF_NEXT(prev);
647 control->tail_mbuf = prev;
652 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
653 if (control->on_read_q) {
655 * On read queue so we must increment the SB stuff,
656 * we assume caller has done any locks of SB.
658 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
660 m = SCTP_BUF_NEXT(m);
663 control->tail_mbuf = prev;
668 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
670 struct mbuf *prev = NULL;
671 struct sctp_tcb *stcb;
673 stcb = control->stcb;
676 panic("Control broken");
681 if (control->tail_mbuf == NULL) {
684 sctp_setup_tail_pointer(control);
687 control->tail_mbuf->m_next = m;
689 if (SCTP_BUF_LEN(m) == 0) {
690 /* Skip mbufs with NO length */
693 control->tail_mbuf->m_next = sctp_m_free(m);
694 m = control->tail_mbuf->m_next;
696 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
697 m = SCTP_BUF_NEXT(prev);
700 control->tail_mbuf = prev;
705 if (control->on_read_q) {
707 * On read queue so we must increment the SB stuff,
708 * we assume caller has done any locks of SB.
710 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
712 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
713 m = SCTP_BUF_NEXT(m);
716 control->tail_mbuf = prev;
721 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
723 memset(nc, 0, sizeof(struct sctp_queued_to_read));
724 nc->sinfo_stream = control->sinfo_stream;
725 nc->sinfo_ssn = control->sinfo_ssn;
726 TAILQ_INIT(&nc->reasm);
727 nc->top_fsn = control->top_fsn;
728 nc->msg_id = control->msg_id;
729 nc->sinfo_flags = control->sinfo_flags;
730 nc->sinfo_ppid = control->sinfo_ppid;
731 nc->sinfo_context = control->sinfo_context;
732 nc->fsn_included = 0xffffffff;
733 nc->sinfo_tsn = control->sinfo_tsn;
734 nc->sinfo_cumtsn = control->sinfo_cumtsn;
735 nc->sinfo_assoc_id = control->sinfo_assoc_id;
736 nc->whoFrom = control->whoFrom;
737 atomic_add_int(&nc->whoFrom->ref_count, 1);
738 nc->stcb = control->stcb;
739 nc->port_from = control->port_from;
743 sctp_reset_a_control(struct sctp_queued_to_read *control,
744 struct sctp_inpcb *inp, uint32_t tsn)
746 control->fsn_included = tsn;
747 if (control->on_read_q) {
749 * We have to purge it from there, hopefully this will work
752 TAILQ_REMOVE(&inp->read_queue, control, next);
753 control->on_read_q = 0;
758 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
759 struct sctp_association *asoc,
760 struct sctp_stream_in *strm,
761 struct sctp_queued_to_read *control,
763 int inp_read_lock_held)
766 * Special handling for the old un-ordered data chunk. All the
767 * chunks/TSN's go to msg_id 0. So we have to do the old style
768 * watching to see if we have it all. If you return one, no other
769 * control entries on the un-ordered queue will be looked at. In
770 * theory there should be no others entries in reality, unless the
771 * guy is sending both unordered NDATA and unordered DATA...
773 struct sctp_tmit_chunk *chk, *lchk, *tchk;
775 struct sctp_queued_to_read *nc;
778 if (control->first_frag_seen == 0) {
779 /* Nothing we can do, we have not seen the first piece yet */
782 /* Collapse any we can */
785 fsn = control->fsn_included + 1;
786 /* Now what can we add? */
787 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
788 if (chk->rec.data.fsn_num == fsn) {
790 sctp_alloc_a_readq(stcb, nc);
794 memset(nc, 0, sizeof(struct sctp_queued_to_read));
795 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
796 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
800 if (control->end_added) {
802 if (!TAILQ_EMPTY(&control->reasm)) {
804 * Ok we have to move anything left
805 * on the control queue to a new
808 sctp_build_readq_entry_from_ctl(nc, control);
809 tchk = TAILQ_FIRST(&control->reasm);
810 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
811 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
812 nc->first_frag_seen = 1;
813 nc->fsn_included = tchk->rec.data.fsn_num;
814 nc->data = tchk->data;
815 nc->sinfo_ppid = tchk->rec.data.payloadtype;
816 nc->sinfo_tsn = tchk->rec.data.TSN_seq;
817 sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
819 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
820 sctp_setup_tail_pointer(nc);
821 tchk = TAILQ_FIRST(&control->reasm);
823 /* Spin the rest onto the queue */
825 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
826 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
827 tchk = TAILQ_FIRST(&control->reasm);
830 * Now lets add it to the queue
831 * after removing control
833 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
834 nc->on_strm_q = SCTP_ON_UNORDERED;
835 if (control->on_strm_q) {
836 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
837 control->on_strm_q = 0;
840 if (control->pdapi_started) {
841 strm->pd_api_started = 0;
842 control->pdapi_started = 0;
844 if (control->on_strm_q) {
845 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
846 control->on_strm_q = 0;
847 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
849 if (control->on_read_q == 0) {
850 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
851 &stcb->sctp_socket->so_rcv, control->end_added,
852 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
854 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
855 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
857 * Switch to the new guy and
863 if (nc->on_strm_q == 0) {
864 sctp_free_a_readq(stcb, nc);
869 sctp_free_a_readq(stcb, nc);
876 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
877 strm->pd_api_started = 1;
878 control->pdapi_started = 1;
879 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
880 &stcb->sctp_socket->so_rcv, control->end_added,
881 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
882 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
890 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
891 struct sctp_association *asoc,
892 struct sctp_queued_to_read *control,
893 struct sctp_tmit_chunk *chk,
896 struct sctp_tmit_chunk *at;
900 * Here we need to place the chunk into the control structure sorted
901 * in the correct order.
903 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
904 /* Its the very first one. */
905 SCTPDBG(SCTP_DEBUG_XXX,
906 "chunk is a first fsn: %u becomes fsn_included\n",
907 chk->rec.data.fsn_num);
908 if (control->first_frag_seen) {
910 * In old un-ordered we can reassembly on one
911 * control multiple messages. As long as the next
912 * FIRST is greater then the old first (TSN i.e. FSN
918 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
920 * Easy way the start of a new guy beyond
925 if ((chk->rec.data.fsn_num == control->fsn_included) ||
926 (control->pdapi_started)) {
928 * Ok this should not happen, if it does we
929 * started the pd-api on the higher TSN
930 * (since the equals part is a TSN failure
933 * We are completly hosed in that case since I
934 * have no way to recover. This really will
935 * only happen if we can get more TSN's
936 * higher before the pd-api-point.
938 sctp_abort_in_reasm(stcb, control, chk,
940 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
945 * Ok we have two firsts and the one we just got is
946 * smaller than the one we previously placed.. yuck!
947 * We must swap them out.
950 tdata = control->data;
951 control->data = chk->data;
953 /* Save the lengths */
954 chk->send_size = control->length;
955 /* Recompute length of control and tail pointer */
956 sctp_setup_tail_pointer(control);
957 /* Fix the FSN included */
958 tmp = control->fsn_included;
959 control->fsn_included = chk->rec.data.fsn_num;
960 chk->rec.data.fsn_num = tmp;
961 /* Fix the TSN included */
962 tmp = control->sinfo_tsn;
963 control->sinfo_tsn = chk->rec.data.TSN_seq;
964 chk->rec.data.TSN_seq = tmp;
965 /* Fix the PPID included */
966 tmp = control->sinfo_ppid;
967 control->sinfo_ppid = chk->rec.data.payloadtype;
968 chk->rec.data.payloadtype = tmp;
969 /* Fix tail pointer */
972 control->first_frag_seen = 1;
973 control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
974 control->sinfo_tsn = chk->rec.data.TSN_seq;
975 control->sinfo_ppid = chk->rec.data.payloadtype;
976 control->data = chk->data;
977 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
979 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
980 sctp_setup_tail_pointer(control);
985 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
986 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
988 * This one in queue is bigger than the new one,
989 * insert the new one before at.
991 asoc->size_on_reasm_queue += chk->send_size;
992 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
994 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
996 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
998 * They sent a duplicate fsn number. This really
999 * should not happen since the FSN is a TSN and it
1000 * should have been dropped earlier.
1002 sctp_abort_in_reasm(stcb, control, chk,
1004 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1008 if (inserted == 0) {
1009 /* Its at the end */
1010 asoc->size_on_reasm_queue += chk->send_size;
1011 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1012 control->top_fsn = chk->rec.data.fsn_num;
1013 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1018 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1019 struct sctp_stream_in *strm, int inp_read_lock_held)
1022 * Given a stream, strm, see if any of the SSN's on it that are
1023 * fragmented are ready to deliver. If so go ahead and place them on
1024 * the read queue. In so placing if we have hit the end, then we
1025 * need to remove them from the stream's queue.
1027 struct sctp_queued_to_read *control, *nctl = NULL;
1028 uint32_t next_to_del;
1032 if (stcb->sctp_socket) {
1033 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1034 stcb->sctp_ep->partial_delivery_point);
1036 pd_point = stcb->sctp_ep->partial_delivery_point;
1038 control = TAILQ_FIRST(&strm->uno_inqueue);
1041 (asoc->idata_supported == 0)) {
1042 /* Special handling needed for "old" data format */
1043 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1047 if (strm->pd_api_started) {
1048 /* Can't add more */
1052 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1053 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
1054 nctl = TAILQ_NEXT(control, next_instrm);
1055 if (control->end_added) {
1056 /* We just put the last bit on */
1057 if (control->on_strm_q) {
1059 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1060 panic("Huh control: %p on_q: %d -- not unordered?",
1061 control, control->on_strm_q);
1064 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1065 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1066 control->on_strm_q = 0;
1068 if (control->on_read_q == 0) {
1069 sctp_add_to_readq(stcb->sctp_ep, stcb,
1071 &stcb->sctp_socket->so_rcv, control->end_added,
1072 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1075 /* Can we do a PD-API for this un-ordered guy? */
1076 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1077 strm->pd_api_started = 1;
1078 control->pdapi_started = 1;
1079 sctp_add_to_readq(stcb->sctp_ep, stcb,
1081 &stcb->sctp_socket->so_rcv, control->end_added,
1082 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1090 control = TAILQ_FIRST(&strm->inqueue);
1091 if (strm->pd_api_started) {
1092 /* Can't add more */
1095 if (control == NULL) {
1098 if (strm->last_sequence_delivered == control->sinfo_ssn) {
1100 * Ok the guy at the top was being partially delivered
1101 * completed, so we remove it. Note the pd_api flag was
1102 * taken off when the chunk was merged on in
1103 * sctp_queue_data_for_reasm below.
1105 nctl = TAILQ_NEXT(control, next_instrm);
1106 SCTPDBG(SCTP_DEBUG_XXX,
1107 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1108 control, control->end_added, control->sinfo_ssn,
1109 control->top_fsn, control->fsn_included,
1110 strm->last_sequence_delivered);
1111 if (control->end_added) {
1112 if (control->on_strm_q) {
1114 if (control->on_strm_q != SCTP_ON_ORDERED) {
1115 panic("Huh control: %p on_q: %d -- not ordered?",
1116 control, control->on_strm_q);
1119 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1120 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1121 control->on_strm_q = 0;
1123 if (strm->pd_api_started && control->pdapi_started) {
1124 control->pdapi_started = 0;
1125 strm->pd_api_started = 0;
1127 if (control->on_read_q == 0) {
1128 sctp_add_to_readq(stcb->sctp_ep, stcb,
1130 &stcb->sctp_socket->so_rcv, control->end_added,
1131 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1136 if (strm->pd_api_started) {
1138 * Can't add more must have gotten an un-ordered above being
1139 * partially delivered.
1144 next_to_del = strm->last_sequence_delivered + 1;
1146 SCTPDBG(SCTP_DEBUG_XXX,
1147 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1148 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
1150 nctl = TAILQ_NEXT(control, next_instrm);
1151 if ((control->sinfo_ssn == next_to_del) &&
1152 (control->first_frag_seen)) {
1155 /* Ok we can deliver it onto the stream. */
1156 if (control->end_added) {
1157 /* We are done with it afterwards */
1158 if (control->on_strm_q) {
1160 if (control->on_strm_q != SCTP_ON_ORDERED) {
1161 panic("Huh control: %p on_q: %d -- not ordered?",
1162 control, control->on_strm_q);
1165 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1166 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1167 control->on_strm_q = 0;
1171 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1173 * A singleton now slipping through - mark
1174 * it non-revokable too
1176 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1177 } else if (control->end_added == 0) {
1179 * Check if we can defer adding until its
1182 if ((control->length < pd_point) || (strm->pd_api_started)) {
1184 * Don't need it or cannot add more
1185 * (one being delivered that way)
1190 done = (control->end_added) && (control->last_frag_seen);
1191 if (control->on_read_q == 0) {
1192 sctp_add_to_readq(stcb->sctp_ep, stcb,
1194 &stcb->sctp_socket->so_rcv, control->end_added,
1195 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1197 strm->last_sequence_delivered = next_to_del;
1202 /* We are now doing PD API */
1203 strm->pd_api_started = 1;
1204 control->pdapi_started = 1;
1214 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1215 struct sctp_stream_in *strm,
1216 struct sctp_tcb *stcb, struct sctp_association *asoc,
1217 struct sctp_tmit_chunk *chk, int hold_rlock)
1220 * Given a control and a chunk, merge the data from the chk onto the
1221 * control and free up the chunk resources.
1225 if (control->on_read_q && (hold_rlock == 0)) {
1227 * Its being pd-api'd so we must do some locks.
1229 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1232 if (control->data == NULL) {
1233 control->data = chk->data;
1234 sctp_setup_tail_pointer(control);
1236 sctp_add_to_tail_pointer(control, chk->data);
1238 control->fsn_included = chk->rec.data.fsn_num;
1239 asoc->size_on_reasm_queue -= chk->send_size;
1240 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1241 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1243 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1244 control->first_frag_seen = 1;
1246 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1248 if ((control->on_strm_q) && (control->on_read_q)) {
1249 if (control->pdapi_started) {
1250 control->pdapi_started = 0;
1251 strm->pd_api_started = 0;
1253 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1255 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1256 control->on_strm_q = 0;
1257 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1259 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1260 control->on_strm_q = 0;
1262 } else if (control->on_strm_q) {
1263 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1264 control->on_strm_q);
1268 control->end_added = 1;
1269 control->last_frag_seen = 1;
1272 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1274 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1278 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1279 * queue, see if anthing can be delivered. If so pull it off (or as much as
1280 * we can. If we run out of space then we must dump what we can and set the
1281 * appropriate flag to say we queued what we could.
1284 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1285 struct sctp_stream_in *strm,
1286 struct sctp_queued_to_read *control,
1287 struct sctp_tmit_chunk *chk,
1288 int created_control,
1289 int *abort_flag, uint32_t tsn)
1292 struct sctp_tmit_chunk *at, *nat;
1293 int do_wakeup, unordered;
1296 * For old un-ordered data chunks.
1298 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1303 /* Must be added to the stream-in queue */
1304 if (created_control) {
1305 if (sctp_place_control_in_stream(strm, asoc, control)) {
1306 /* Duplicate SSN? */
1307 sctp_clean_up_control(stcb, control);
1308 sctp_abort_in_reasm(stcb, control, chk,
1310 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1313 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1315 * Ok we created this control and now lets validate
1316 * that its legal i.e. there is a B bit set, if not
1317 * and we have up to the cum-ack then its invalid.
1319 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1320 sctp_abort_in_reasm(stcb, control, chk,
1322 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1327 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1328 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1332 * Ok we must queue the chunk into the reasembly portion: o if its
1333 * the first it goes to the control mbuf. o if its not first but the
1334 * next in sequence it goes to the control, and each succeeding one
1335 * in order also goes. o if its not in order we place it on the list
1338 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1339 /* Its the very first one. */
1340 SCTPDBG(SCTP_DEBUG_XXX,
1341 "chunk is a first fsn: %u becomes fsn_included\n",
1342 chk->rec.data.fsn_num);
1343 if (control->first_frag_seen) {
1345 * Error on senders part, they either sent us two
1346 * data chunks with FIRST, or they sent two
1347 * un-ordered chunks that were fragmented at the
1348 * same time in the same stream.
1350 sctp_abort_in_reasm(stcb, control, chk,
1352 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1355 control->first_frag_seen = 1;
1356 control->fsn_included = chk->rec.data.fsn_num;
1357 control->data = chk->data;
1358 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1360 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1361 sctp_setup_tail_pointer(control);
1363 /* Place the chunk in our list */
1366 if (control->last_frag_seen == 0) {
1367 /* Still willing to raise highest FSN seen */
1368 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1369 SCTPDBG(SCTP_DEBUG_XXX,
1370 "We have a new top_fsn: %u\n",
1371 chk->rec.data.fsn_num);
1372 control->top_fsn = chk->rec.data.fsn_num;
1374 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1375 SCTPDBG(SCTP_DEBUG_XXX,
1376 "The last fsn is now in place fsn: %u\n",
1377 chk->rec.data.fsn_num);
1378 control->last_frag_seen = 1;
1380 if (asoc->idata_supported || control->first_frag_seen) {
1382 * For IDATA we always check since we know
1383 * that the first fragment is 0. For old
1384 * DATA we have to receive the first before
1385 * we know the first FSN (which is the TSN).
1387 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1389 * We have already delivered up to
1392 sctp_abort_in_reasm(stcb, control, chk,
1394 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1399 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1400 /* Second last? huh? */
1401 SCTPDBG(SCTP_DEBUG_XXX,
1402 "Duplicate last fsn: %u (top: %u) -- abort\n",
1403 chk->rec.data.fsn_num, control->top_fsn);
1404 sctp_abort_in_reasm(stcb, control,
1406 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1409 if (asoc->idata_supported || control->first_frag_seen) {
1411 * For IDATA we always check since we know
1412 * that the first fragment is 0. For old
1413 * DATA we have to receive the first before
1414 * we know the first FSN (which is the TSN).
1417 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1419 * We have already delivered up to
1422 SCTPDBG(SCTP_DEBUG_XXX,
1423 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1424 chk->rec.data.fsn_num, control->fsn_included);
1425 sctp_abort_in_reasm(stcb, control, chk,
1427 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1432 * validate not beyond top FSN if we have seen last
1435 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1436 SCTPDBG(SCTP_DEBUG_XXX,
1437 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1438 chk->rec.data.fsn_num,
1440 sctp_abort_in_reasm(stcb, control, chk,
1442 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1447 * If we reach here, we need to place the new chunk in the
1448 * reassembly for this control.
1450 SCTPDBG(SCTP_DEBUG_XXX,
1451 "chunk is a not first fsn: %u needs to be inserted\n",
1452 chk->rec.data.fsn_num);
1453 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1454 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
1456 * This one in queue is bigger than the new
1457 * one, insert the new one before at.
1459 SCTPDBG(SCTP_DEBUG_XXX,
1460 "Insert it before fsn: %u\n",
1461 at->rec.data.fsn_num);
1462 asoc->size_on_reasm_queue += chk->send_size;
1463 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1464 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1467 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
1469 * Gak, He sent me a duplicate str seq
1473 * foo bar, I guess I will just free this
1474 * new guy, should we abort too? FIX ME
1475 * MAYBE? Or it COULD be that the SSN's have
1476 * wrapped. Maybe I should compare to TSN
1477 * somehow... sigh for now just blow away
1480 SCTPDBG(SCTP_DEBUG_XXX,
1481 "Duplicate to fsn: %u -- abort\n",
1482 at->rec.data.fsn_num);
1483 sctp_abort_in_reasm(stcb, control,
1485 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1489 if (inserted == 0) {
1490 /* Goes on the end */
1491 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1492 chk->rec.data.fsn_num);
1493 asoc->size_on_reasm_queue += chk->send_size;
1494 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1495 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1499 * Ok lets see if we can suck any up into the control structure that
1500 * are in seq if it makes sense.
1504 * If the first fragment has not been seen there is no sense in
1507 if (control->first_frag_seen) {
1508 next_fsn = control->fsn_included + 1;
1509 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1510 if (at->rec.data.fsn_num == next_fsn) {
1511 /* We can add this one now to the control */
1512 SCTPDBG(SCTP_DEBUG_XXX,
1513 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1515 at->rec.data.fsn_num,
1516 next_fsn, control->fsn_included);
1517 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1518 sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1519 if (control->on_read_q) {
1523 if (control->end_added && control->pdapi_started) {
1524 if (strm->pd_api_started) {
1525 strm->pd_api_started = 0;
1526 control->pdapi_started = 0;
1528 if (control->on_read_q == 0) {
1529 sctp_add_to_readq(stcb->sctp_ep, stcb,
1531 &stcb->sctp_socket->so_rcv, control->end_added,
1532 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1543 /* Need to wakeup the reader */
1544 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1548 static struct sctp_queued_to_read *
1549 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
1551 struct sctp_queued_to_read *control;
1554 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1555 if (control->msg_id == msg_id) {
1561 control = TAILQ_FIRST(&strm->uno_inqueue);
1564 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1565 if (control->msg_id == msg_id) {
1574 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1575 struct mbuf **m, int offset, int chk_length,
1576 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1577 int *break_flag, int last_chunk, uint8_t chtype)
1579 /* Process a data chunk */
1580 /* struct sctp_tmit_chunk *chk; */
1581 struct sctp_data_chunk *ch;
1582 struct sctp_idata_chunk *nch, chunk_buf;
1583 struct sctp_tmit_chunk *chk;
1584 uint32_t tsn, fsn, gap, msg_id;
1587 int need_reasm_check = 0;
1589 struct mbuf *op_err;
1590 char msg[SCTP_DIAG_INFO_LEN];
1591 struct sctp_queued_to_read *control = NULL;
1592 uint32_t protocol_id;
1593 uint8_t chunk_flags;
1594 struct sctp_stream_reset_list *liste;
1595 struct sctp_stream_in *strm;
1598 int created_control = 0;
1602 if (chtype == SCTP_IDATA) {
1603 nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1604 sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1605 ch = (struct sctp_data_chunk *)nch;
1606 clen = sizeof(struct sctp_idata_chunk);
1607 tsn = ntohl(ch->dp.tsn);
1608 msg_id = ntohl(nch->dp.msg_id);
1609 protocol_id = nch->dp.ppid_fsn.protocol_id;
1610 if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1613 fsn = ntohl(nch->dp.ppid_fsn.fsn);
1616 ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1617 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1618 tsn = ntohl(ch->dp.tsn);
1619 protocol_id = ch->dp.protocol_id;
1620 clen = sizeof(struct sctp_data_chunk);
1622 msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
1626 chunk_flags = ch->ch.chunk_flags;
1627 if ((size_t)chk_length == clen) {
1629 * Need to send an abort since we had a empty data chunk.
1631 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1632 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1633 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1637 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1638 asoc->send_sack = 1;
1640 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1641 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1642 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1647 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1648 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1649 /* It is a duplicate */
1650 SCTP_STAT_INCR(sctps_recvdupdata);
1651 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1652 /* Record a dup for the next outbound sack */
1653 asoc->dup_tsns[asoc->numduptsns] = tsn;
1656 asoc->send_sack = 1;
1659 /* Calculate the number of TSN's between the base and this TSN */
1660 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1661 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1662 /* Can't hold the bit in the mapping at max array, toss it */
1665 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1666 SCTP_TCB_LOCK_ASSERT(stcb);
1667 if (sctp_expand_mapping_array(asoc, gap)) {
1668 /* Can't expand, drop it */
1672 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1675 /* See if we have received this one already */
1676 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1677 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1678 SCTP_STAT_INCR(sctps_recvdupdata);
1679 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1680 /* Record a dup for the next outbound sack */
1681 asoc->dup_tsns[asoc->numduptsns] = tsn;
1684 asoc->send_sack = 1;
1688 * Check to see about the GONE flag, duplicates would cause a sack
1689 * to be sent up above
1691 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1692 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1693 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1695 * wait a minute, this guy is gone, there is no longer a
1696 * receiver. Send peer an ABORT!
1698 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1699 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1704 * Now before going further we see if there is room. If NOT then we
1705 * MAY let one through only IF this TSN is the one we are waiting
1706 * for on a partial delivery API.
1709 /* Is the stream valid? */
1710 strmno = ntohs(ch->dp.stream_id);
1712 if (strmno >= asoc->streamincnt) {
1713 struct sctp_error_invalid_stream *cause;
1715 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1716 0, M_NOWAIT, 1, MT_DATA);
1717 if (op_err != NULL) {
1718 /* add some space up front so prepend will work well */
1719 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1720 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1722 * Error causes are just param's and this one has
1723 * two back to back phdr, one with the error type
1724 * and size, the other with the streamid and a rsvd
1726 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1727 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1728 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1729 cause->stream_id = ch->dp.stream_id;
1730 cause->reserved = htons(0);
1731 sctp_queue_op_err(stcb, op_err);
1733 SCTP_STAT_INCR(sctps_badsid);
1734 SCTP_TCB_LOCK_ASSERT(stcb);
1735 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1736 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1737 asoc->highest_tsn_inside_nr_map = tsn;
1739 if (tsn == (asoc->cumulative_tsn + 1)) {
1740 /* Update cum-ack */
1741 asoc->cumulative_tsn = tsn;
1745 strm = &asoc->strmin[strmno];
1747 * If its a fragmented message, lets see if we can find the control
1748 * on the reassembly queues.
1750 if ((chtype == SCTP_IDATA) && ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) && (fsn == 0)) {
1752 * The first *must* be fsn 0, and other (middle/end) pieces
1753 * can *not* be fsn 0.
1757 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1758 /* See if we can find the re-assembly entity */
1759 control = sctp_find_reasm_entry(strm, msg_id, ordered, old_data);
1760 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1761 chunk_flags, control);
1763 /* We found something, does it belong? */
1764 if (ordered && (msg_id != control->sinfo_ssn)) {
1766 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1767 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1768 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1772 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1774 * We can't have a switched order with an
1779 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1781 * We can't have a switched unordered with a
1789 * Its a complete segment. Lets validate we don't have a
1790 * re-assembly going on with the same Stream/Seq (for
1791 * ordered) or in the same Stream for unordered.
1793 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for msg in case we have dup\n",
1795 if (sctp_find_reasm_entry(strm, msg_id, ordered, old_data)) {
1796 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n",
1803 /* now do the tests */
1804 if (((asoc->cnt_on_all_streams +
1805 asoc->cnt_on_reasm_queue +
1806 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1807 (((int)asoc->my_rwnd) <= 0)) {
1809 * When we have NO room in the rwnd we check to make sure
1810 * the reader is doing its job...
1812 if (stcb->sctp_socket->so_rcv.sb_cc) {
1813 /* some to read, wake-up */
1814 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1817 so = SCTP_INP_SO(stcb->sctp_ep);
1818 atomic_add_int(&stcb->asoc.refcnt, 1);
1819 SCTP_TCB_UNLOCK(stcb);
1820 SCTP_SOCKET_LOCK(so, 1);
1821 SCTP_TCB_LOCK(stcb);
1822 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1823 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1824 /* assoc was freed while we were unlocked */
1825 SCTP_SOCKET_UNLOCK(so, 1);
1829 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1830 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1831 SCTP_SOCKET_UNLOCK(so, 1);
1834 /* now is it in the mapping array of what we have accepted? */
1836 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1837 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1838 /* Nope not in the valid range dump it */
1840 sctp_set_rwnd(stcb, asoc);
1841 if ((asoc->cnt_on_all_streams +
1842 asoc->cnt_on_reasm_queue +
1843 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1844 SCTP_STAT_INCR(sctps_datadropchklmt);
1846 SCTP_STAT_INCR(sctps_datadroprwnd);
1852 if (control == NULL) {
1855 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1860 #ifdef SCTP_ASOCLOG_OF_TSNS
1861 SCTP_TCB_LOCK_ASSERT(stcb);
1862 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1863 asoc->tsn_in_at = 0;
1864 asoc->tsn_in_wrapped = 1;
1866 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1867 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1868 asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
1869 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1870 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1871 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1872 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1873 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1877 * Before we continue lets validate that we are not being fooled by
1878 * an evil attacker. We can only have Nk chunks based on our TSN
1879 * spread allowed by the mapping array N * 8 bits, so there is no
1880 * way our stream sequence numbers could have wrapped. We of course
1881 * only validate the FIRST fragment so the bit must be set.
1883 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1884 (TAILQ_EMPTY(&asoc->resetHead)) &&
1885 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1886 SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
1887 /* The incoming sseq is behind where we last delivered? */
1888 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1889 msg_id, asoc->strmin[strmno].last_sequence_delivered);
1891 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1892 asoc->strmin[strmno].last_sequence_delivered,
1893 tsn, strmno, msg_id);
1894 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1895 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1896 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1900 /************************************
1901 * From here down we may find ch-> invalid
1902 * so its a good idea NOT to use it.
1903 *************************************/
1905 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1907 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1909 if (last_chunk == 0) {
1911 dmbuf = SCTP_M_COPYM(*m,
1912 (offset + sizeof(struct sctp_idata_chunk)),
1915 dmbuf = SCTP_M_COPYM(*m,
1916 (offset + sizeof(struct sctp_data_chunk)),
1919 #ifdef SCTP_MBUF_LOGGING
1920 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1921 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1925 /* We can steal the last chunk */
1929 /* lop off the top part */
1931 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1933 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1935 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1936 l_len = SCTP_BUF_LEN(dmbuf);
1939 * need to count up the size hopefully does not hit
1945 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1946 l_len += SCTP_BUF_LEN(lat);
1949 if (l_len > the_len) {
1950 /* Trim the end round bytes off too */
1951 m_adj(dmbuf, -(l_len - the_len));
1954 if (dmbuf == NULL) {
1955 SCTP_STAT_INCR(sctps_nomem);
1959 * Now no matter what we need a control, get one if we don't have
1960 * one (we may have gotten it above when we found the message was
1963 if (control == NULL) {
1964 sctp_alloc_a_readq(stcb, control);
1965 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1970 if (control == NULL) {
1971 SCTP_STAT_INCR(sctps_nomem);
1974 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1975 control->data = dmbuf;
1976 control->tail_mbuf = NULL;
1977 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1978 control->top_fsn = control->fsn_included = fsn;
1980 created_control = 1;
1982 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n",
1983 chunk_flags, ordered, msg_id, control);
1984 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1985 TAILQ_EMPTY(&asoc->resetHead) &&
1987 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
1988 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1989 /* Candidate for express delivery */
1991 * Its not fragmented, No PD-API is up, Nothing in the
1992 * delivery queue, Its un-ordered OR ordered and the next to
1993 * deliver AND nothing else is stuck on the stream queue,
1994 * And there is room for it in the socket buffer. Lets just
1995 * stuff it up the buffer....
1997 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1998 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1999 asoc->highest_tsn_inside_nr_map = tsn;
2001 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n",
2004 sctp_add_to_readq(stcb->sctp_ep, stcb,
2005 control, &stcb->sctp_socket->so_rcv,
2006 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2008 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2009 /* for ordered, bump what we delivered */
2010 strm->last_sequence_delivered++;
2012 SCTP_STAT_INCR(sctps_recvexpress);
2013 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2014 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
2015 SCTP_STR_LOG_FROM_EXPRS_DEL);
2018 goto finish_express_del;
2020 /* Now will we need a chunk too? */
2021 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2022 sctp_alloc_a_chunk(stcb, chk);
2024 /* No memory so we drop the chunk */
2025 SCTP_STAT_INCR(sctps_nomem);
2026 if (last_chunk == 0) {
2027 /* we copied it, free the copy */
2028 sctp_m_freem(dmbuf);
2032 chk->rec.data.TSN_seq = tsn;
2033 chk->no_fr_allowed = 0;
2034 chk->rec.data.fsn_num = fsn;
2035 chk->rec.data.stream_seq = msg_id;
2036 chk->rec.data.stream_number = strmno;
2037 chk->rec.data.payloadtype = protocol_id;
2038 chk->rec.data.context = stcb->asoc.context;
2039 chk->rec.data.doing_fast_retransmit = 0;
2040 chk->rec.data.rcv_flags = chunk_flags;
2042 chk->send_size = the_len;
2044 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n",
2047 atomic_add_int(&net->ref_count, 1);
2050 /* Set the appropriate TSN mark */
2051 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2052 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2053 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2054 asoc->highest_tsn_inside_nr_map = tsn;
2057 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2058 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2059 asoc->highest_tsn_inside_map = tsn;
2062 /* Now is it complete (i.e. not fragmented)? */
2063 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2065 * Special check for when streams are resetting. We could be
2066 * more smart about this and check the actual stream to see
2067 * if it is not being reset.. that way we would not create a
2068 * HOLB when amongst streams being reset and those not being
2072 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2073 SCTP_TSN_GT(tsn, liste->tsn)) {
2075 * yep its past where we need to reset... go ahead
2078 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2080 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2082 struct sctp_queued_to_read *ctlOn, *nctlOn;
2083 unsigned char inserted = 0;
2085 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2086 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2091 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2096 if (inserted == 0) {
2098 * must be put at end, use prevP
2099 * (all setup from loop) to setup
2102 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2105 goto finish_express_del;
2107 if (chunk_flags & SCTP_DATA_UNORDERED) {
2108 /* queue directly into socket buffer */
2109 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n",
2111 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2112 sctp_add_to_readq(stcb->sctp_ep, stcb,
2114 &stcb->sctp_socket->so_rcv, 1,
2115 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2118 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control,
2120 sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2128 goto finish_express_del;
2130 /* If we reach here its a reassembly */
2131 need_reasm_check = 1;
2132 SCTPDBG(SCTP_DEBUG_XXX,
2133 "Queue data to stream for reasm control: %p msg_id: %u\n",
2135 sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2138 * the assoc is now gone and chk was put onto the reasm
2139 * queue, which has all been freed.
2147 /* Here we tidy up things */
2148 if (tsn == (asoc->cumulative_tsn + 1)) {
2149 /* Update cum-ack */
2150 asoc->cumulative_tsn = tsn;
2156 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2158 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2160 SCTP_STAT_INCR(sctps_recvdata);
2161 /* Set it present please */
2162 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2163 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2165 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2166 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2167 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2169 /* check the special flag for stream resets */
2170 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2171 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2173 * we have finished working through the backlogged TSN's now
2174 * time to reset streams. 1: call reset function. 2: free
2175 * pending_reply space 3: distribute any chunks in
2176 * pending_reply_queue.
2178 struct sctp_queued_to_read *ctl, *nctl;
2180 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2181 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2182 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2183 SCTP_FREE(liste, SCTP_M_STRESET);
2184 /* sa_ignore FREED_MEMORY */
2185 liste = TAILQ_FIRST(&asoc->resetHead);
2186 if (TAILQ_EMPTY(&asoc->resetHead)) {
2187 /* All can be removed */
2188 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2189 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2190 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2196 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2197 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2201 * if ctl->sinfo_tsn is <= liste->tsn we can
2202 * process it which is the NOT of
2203 * ctl->sinfo_tsn > liste->tsn
2205 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2206 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2213 * Now service re-assembly to pick up anything that has been
2214 * held on reassembly queue?
2216 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2217 need_reasm_check = 0;
2219 if (need_reasm_check) {
2220 /* Another one waits ? */
2221 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2226 static const int8_t sctp_map_lookup_tab[256] = {
2227 0, 1, 0, 2, 0, 1, 0, 3,
2228 0, 1, 0, 2, 0, 1, 0, 4,
2229 0, 1, 0, 2, 0, 1, 0, 3,
2230 0, 1, 0, 2, 0, 1, 0, 5,
2231 0, 1, 0, 2, 0, 1, 0, 3,
2232 0, 1, 0, 2, 0, 1, 0, 4,
2233 0, 1, 0, 2, 0, 1, 0, 3,
2234 0, 1, 0, 2, 0, 1, 0, 6,
2235 0, 1, 0, 2, 0, 1, 0, 3,
2236 0, 1, 0, 2, 0, 1, 0, 4,
2237 0, 1, 0, 2, 0, 1, 0, 3,
2238 0, 1, 0, 2, 0, 1, 0, 5,
2239 0, 1, 0, 2, 0, 1, 0, 3,
2240 0, 1, 0, 2, 0, 1, 0, 4,
2241 0, 1, 0, 2, 0, 1, 0, 3,
2242 0, 1, 0, 2, 0, 1, 0, 7,
2243 0, 1, 0, 2, 0, 1, 0, 3,
2244 0, 1, 0, 2, 0, 1, 0, 4,
2245 0, 1, 0, 2, 0, 1, 0, 3,
2246 0, 1, 0, 2, 0, 1, 0, 5,
2247 0, 1, 0, 2, 0, 1, 0, 3,
2248 0, 1, 0, 2, 0, 1, 0, 4,
2249 0, 1, 0, 2, 0, 1, 0, 3,
2250 0, 1, 0, 2, 0, 1, 0, 6,
2251 0, 1, 0, 2, 0, 1, 0, 3,
2252 0, 1, 0, 2, 0, 1, 0, 4,
2253 0, 1, 0, 2, 0, 1, 0, 3,
2254 0, 1, 0, 2, 0, 1, 0, 5,
2255 0, 1, 0, 2, 0, 1, 0, 3,
2256 0, 1, 0, 2, 0, 1, 0, 4,
2257 0, 1, 0, 2, 0, 1, 0, 3,
2258 0, 1, 0, 2, 0, 1, 0, 8
2263 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2266 * Now we also need to check the mapping array in a couple of ways.
2267 * 1) Did we move the cum-ack point?
2269 * When you first glance at this you might think that all entries that
2270 * make up the position of the cum-ack would be in the nr-mapping
2271 * array only.. i.e. things up to the cum-ack are always
2272 * deliverable. Thats true with one exception, when its a fragmented
2273 * message we may not deliver the data until some threshold (or all
2274 * of it) is in place. So we must OR the nr_mapping_array and
2275 * mapping_array to get a true picture of the cum-ack.
2277 struct sctp_association *asoc;
2280 int slide_from, slide_end, lgap, distance;
2281 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2285 old_cumack = asoc->cumulative_tsn;
2286 old_base = asoc->mapping_array_base_tsn;
2287 old_highest = asoc->highest_tsn_inside_map;
2289 * We could probably improve this a small bit by calculating the
2290 * offset of the current cum-ack as the starting point.
2293 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2294 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2298 /* there is a 0 bit */
2299 at += sctp_map_lookup_tab[val];
2303 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2305 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2306 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2308 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2309 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2311 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2312 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2313 sctp_print_mapping_array(asoc);
2314 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2315 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2317 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2318 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2321 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2322 highest_tsn = asoc->highest_tsn_inside_nr_map;
2324 highest_tsn = asoc->highest_tsn_inside_map;
2326 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2327 /* The complete array was completed by a single FR */
2328 /* highest becomes the cum-ack */
2336 /* clear the array */
2337 clr = ((at + 7) >> 3);
2338 if (clr > asoc->mapping_array_size) {
2339 clr = asoc->mapping_array_size;
2341 memset(asoc->mapping_array, 0, clr);
2342 memset(asoc->nr_mapping_array, 0, clr);
2344 for (i = 0; i < asoc->mapping_array_size; i++) {
2345 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2346 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2347 sctp_print_mapping_array(asoc);
2351 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2352 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2353 } else if (at >= 8) {
2354 /* we can slide the mapping array down */
2355 /* slide_from holds where we hit the first NON 0xff byte */
2358 * now calculate the ceiling of the move using our highest
2361 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2362 slide_end = (lgap >> 3);
2363 if (slide_end < slide_from) {
2364 sctp_print_mapping_array(asoc);
2366 panic("impossible slide");
2368 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2369 lgap, slide_end, slide_from, at);
2373 if (slide_end > asoc->mapping_array_size) {
2375 panic("would overrun buffer");
2377 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2378 asoc->mapping_array_size, slide_end);
2379 slide_end = asoc->mapping_array_size;
2382 distance = (slide_end - slide_from) + 1;
2383 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2384 sctp_log_map(old_base, old_cumack, old_highest,
2385 SCTP_MAP_PREPARE_SLIDE);
2386 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2387 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2389 if (distance + slide_from > asoc->mapping_array_size ||
2392 * Here we do NOT slide forward the array so that
2393 * hopefully when more data comes in to fill it up
2394 * we will be able to slide it forward. Really I
2395 * don't think this should happen :-0
2398 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2399 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2400 (uint32_t) asoc->mapping_array_size,
2401 SCTP_MAP_SLIDE_NONE);
2406 for (ii = 0; ii < distance; ii++) {
2407 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2408 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2411 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2412 asoc->mapping_array[ii] = 0;
2413 asoc->nr_mapping_array[ii] = 0;
2415 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2416 asoc->highest_tsn_inside_map += (slide_from << 3);
2418 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2419 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2421 asoc->mapping_array_base_tsn += (slide_from << 3);
2422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2423 sctp_log_map(asoc->mapping_array_base_tsn,
2424 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2425 SCTP_MAP_SLIDE_RESULT);
2432 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2434 struct sctp_association *asoc;
2435 uint32_t highest_tsn;
2438 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2439 highest_tsn = asoc->highest_tsn_inside_nr_map;
2441 highest_tsn = asoc->highest_tsn_inside_map;
2445 * Now we need to see if we need to queue a sack or just start the
2446 * timer (if allowed).
2448 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2450 * Ok special case, in SHUTDOWN-SENT case. here we maker
2451 * sure SACK timer is off and instead send a SHUTDOWN and a
2454 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2455 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2456 stcb->sctp_ep, stcb, NULL,
2457 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2459 sctp_send_shutdown(stcb,
2460 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2461 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2465 /* is there a gap now ? */
2466 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2469 * CMT DAC algorithm: increase number of packets received
2472 stcb->asoc.cmt_dac_pkts_rcvd++;
2474 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2476 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2478 (stcb->asoc.numduptsns) || /* we have dup's */
2479 (is_a_gap) || /* is still a gap */
2480 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2481 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2484 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2485 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2486 (stcb->asoc.send_sack == 0) &&
2487 (stcb->asoc.numduptsns == 0) &&
2488 (stcb->asoc.delayed_ack) &&
2489 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2492 * CMT DAC algorithm: With CMT, delay acks
2493 * even in the face of
2495 * reordering. Therefore, if acks that do not
2496 * have to be sent because of the above
2497 * reasons, will be delayed. That is, acks
2498 * that would have been sent due to gap
2499 * reports will be delayed with DAC. Start
2500 * the delayed ack timer.
2502 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2503 stcb->sctp_ep, stcb, NULL);
2506 * Ok we must build a SACK since the timer
2507 * is pending, we got our first packet OR
2508 * there are gaps or duplicates.
2510 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2511 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2514 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2515 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2516 stcb->sctp_ep, stcb, NULL);
2523 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2524 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2525 struct sctp_nets *net, uint32_t * high_tsn)
2527 struct sctp_chunkhdr *ch, chunk_buf;
2528 struct sctp_association *asoc;
2529 int num_chunks = 0; /* number of control chunks processed */
2531 int chk_length, break_flag, last_chunk;
2532 int abort_flag = 0, was_a_gap;
2534 uint32_t highest_tsn;
2537 sctp_set_rwnd(stcb, &stcb->asoc);
2540 SCTP_TCB_LOCK_ASSERT(stcb);
2542 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2543 highest_tsn = asoc->highest_tsn_inside_nr_map;
2545 highest_tsn = asoc->highest_tsn_inside_map;
2547 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2549 * setup where we got the last DATA packet from for any SACK that
2550 * may need to go out. Don't bump the net. This is done ONLY when a
2551 * chunk is assigned.
2553 asoc->last_data_chunk_from = net;
2556 * Now before we proceed we must figure out if this is a wasted
2557 * cluster... i.e. it is a small packet sent in and yet the driver
2558 * underneath allocated a full cluster for it. If so we must copy it
2559 * to a smaller mbuf and free up the cluster mbuf. This will help
2560 * with cluster starvation. Note for __Panda__ we don't do this
2561 * since it has clusters all the way down to 64 bytes.
2563 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2564 /* we only handle mbufs that are singletons.. not chains */
2565 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2567 /* ok lets see if we can copy the data up */
2570 /* get the pointers and copy */
2571 to = mtod(m, caddr_t *);
2572 from = mtod((*mm), caddr_t *);
2573 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2574 /* copy the length and free up the old */
2575 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2577 /* success, back copy */
2580 /* We are in trouble in the mbuf world .. yikes */
2584 /* get pointer to the first chunk header */
2585 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2586 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2591 * process all DATA chunks...
2593 *high_tsn = asoc->cumulative_tsn;
2595 asoc->data_pkts_seen++;
2596 while (stop_proc == 0) {
2597 /* validate chunk length */
2598 chk_length = ntohs(ch->chunk_length);
2599 if (length - *offset < chk_length) {
2600 /* all done, mutulated chunk */
2604 if ((asoc->idata_supported == 1) &&
2605 (ch->chunk_type == SCTP_DATA)) {
2606 struct mbuf *op_err;
2607 char msg[SCTP_DIAG_INFO_LEN];
2609 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2610 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2611 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2612 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2615 if ((asoc->idata_supported == 0) &&
2616 (ch->chunk_type == SCTP_IDATA)) {
2617 struct mbuf *op_err;
2618 char msg[SCTP_DIAG_INFO_LEN];
2620 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2621 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2622 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2623 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2626 if ((ch->chunk_type == SCTP_DATA) ||
2627 (ch->chunk_type == SCTP_IDATA)) {
2630 if (ch->chunk_type == SCTP_DATA) {
2631 clen = sizeof(struct sctp_data_chunk);
2633 clen = sizeof(struct sctp_idata_chunk);
2635 if (chk_length < clen) {
2637 * Need to send an abort since we had a
2638 * invalid data chunk.
2640 struct mbuf *op_err;
2641 char msg[SCTP_DIAG_INFO_LEN];
2643 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2645 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2646 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2647 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2650 #ifdef SCTP_AUDITING_ENABLED
2651 sctp_audit_log(0xB1, 0);
2653 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2658 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2659 chk_length, net, high_tsn, &abort_flag, &break_flag,
2660 last_chunk, ch->chunk_type)) {
2668 * Set because of out of rwnd space and no
2669 * drop rep space left.
2675 /* not a data chunk in the data region */
2676 switch (ch->chunk_type) {
2677 case SCTP_INITIATION:
2678 case SCTP_INITIATION_ACK:
2679 case SCTP_SELECTIVE_ACK:
2680 case SCTP_NR_SELECTIVE_ACK:
2681 case SCTP_HEARTBEAT_REQUEST:
2682 case SCTP_HEARTBEAT_ACK:
2683 case SCTP_ABORT_ASSOCIATION:
2685 case SCTP_SHUTDOWN_ACK:
2686 case SCTP_OPERATION_ERROR:
2687 case SCTP_COOKIE_ECHO:
2688 case SCTP_COOKIE_ACK:
2691 case SCTP_SHUTDOWN_COMPLETE:
2692 case SCTP_AUTHENTICATION:
2693 case SCTP_ASCONF_ACK:
2694 case SCTP_PACKET_DROPPED:
2695 case SCTP_STREAM_RESET:
2696 case SCTP_FORWARD_CUM_TSN:
2700 * Now, what do we do with KNOWN
2701 * chunks that are NOT in the right
2704 * For now, I do nothing but ignore
2705 * them. We may later want to add
2706 * sysctl stuff to switch out and do
2707 * either an ABORT() or possibly
2710 struct mbuf *op_err;
2711 char msg[SCTP_DIAG_INFO_LEN];
2713 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2715 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2716 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2720 /* unknown chunk type, use bit rules */
2721 if (ch->chunk_type & 0x40) {
2722 /* Add a error report to the queue */
2723 struct mbuf *op_err;
2724 struct sctp_gen_error_cause *cause;
2726 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2727 0, M_NOWAIT, 1, MT_DATA);
2728 if (op_err != NULL) {
2729 cause = mtod(op_err, struct sctp_gen_error_cause *);
2730 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2731 cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2732 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2733 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2734 if (SCTP_BUF_NEXT(op_err) != NULL) {
2735 sctp_queue_op_err(stcb, op_err);
2737 sctp_m_freem(op_err);
2741 if ((ch->chunk_type & 0x80) == 0) {
2742 /* discard the rest of this packet */
2744 } /* else skip this bad chunk and
2747 } /* switch of chunk type */
2749 *offset += SCTP_SIZE32(chk_length);
2750 if ((*offset >= length) || stop_proc) {
2751 /* no more data left in the mbuf chain */
2755 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2756 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2765 * we need to report rwnd overrun drops.
2767 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2771 * Did we get data, if so update the time for auto-close and
2772 * give peer credit for being alive.
2774 SCTP_STAT_INCR(sctps_recvpktwithdata);
2775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2776 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2777 stcb->asoc.overall_error_count,
2779 SCTP_FROM_SCTP_INDATA,
2782 stcb->asoc.overall_error_count = 0;
2783 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2785 /* now service all of the reassm queue if needed */
2786 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2787 /* Assure that we ack right away */
2788 stcb->asoc.send_sack = 1;
2790 /* Start a sack timer or QUEUE a SACK for sending */
2791 sctp_sack_check(stcb, was_a_gap);
2796 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2797 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2799 uint32_t * biggest_newly_acked_tsn,
2800 uint32_t * this_sack_lowest_newack,
2803 struct sctp_tmit_chunk *tp1;
2804 unsigned int theTSN;
2805 int j, wake_him = 0, circled = 0;
2807 /* Recover the tp1 we last saw */
2810 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2812 for (j = frag_strt; j <= frag_end; j++) {
2813 theTSN = j + last_tsn;
2815 if (tp1->rec.data.doing_fast_retransmit)
2819 * CMT: CUCv2 algorithm. For each TSN being
2820 * processed from the sent queue, track the
2821 * next expected pseudo-cumack, or
2822 * rtx_pseudo_cumack, if required. Separate
2823 * cumack trackers for first transmissions,
2824 * and retransmissions.
2826 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2827 (tp1->whoTo->find_pseudo_cumack == 1) &&
2828 (tp1->snd_count == 1)) {
2829 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2830 tp1->whoTo->find_pseudo_cumack = 0;
2832 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2833 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2834 (tp1->snd_count > 1)) {
2835 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2836 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2838 if (tp1->rec.data.TSN_seq == theTSN) {
2839 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2841 * must be held until
2844 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2846 * If it is less than RESEND, it is
2847 * now no-longer in flight.
2848 * Higher values may already be set
2849 * via previous Gap Ack Blocks...
2850 * i.e. ACKED or RESEND.
2852 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2853 *biggest_newly_acked_tsn)) {
2854 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2857 * CMT: SFR algo (and HTNA) - set
2858 * saw_newack to 1 for dest being
2859 * newly acked. update
2860 * this_sack_highest_newack if
2863 if (tp1->rec.data.chunk_was_revoked == 0)
2864 tp1->whoTo->saw_newack = 1;
2866 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2867 tp1->whoTo->this_sack_highest_newack)) {
2868 tp1->whoTo->this_sack_highest_newack =
2869 tp1->rec.data.TSN_seq;
2872 * CMT DAC algo: also update
2873 * this_sack_lowest_newack
2875 if (*this_sack_lowest_newack == 0) {
2876 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2877 sctp_log_sack(*this_sack_lowest_newack,
2879 tp1->rec.data.TSN_seq,
2882 SCTP_LOG_TSN_ACKED);
2884 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2887 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2888 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2889 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2890 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2891 * Separate pseudo_cumack trackers for first transmissions and
2894 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2895 if (tp1->rec.data.chunk_was_revoked == 0) {
2896 tp1->whoTo->new_pseudo_cumack = 1;
2898 tp1->whoTo->find_pseudo_cumack = 1;
2900 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2901 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2903 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2904 if (tp1->rec.data.chunk_was_revoked == 0) {
2905 tp1->whoTo->new_pseudo_cumack = 1;
2907 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2909 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2910 sctp_log_sack(*biggest_newly_acked_tsn,
2912 tp1->rec.data.TSN_seq,
2915 SCTP_LOG_TSN_ACKED);
2917 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2918 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2919 tp1->whoTo->flight_size,
2921 (uint32_t) (uintptr_t) tp1->whoTo,
2922 tp1->rec.data.TSN_seq);
2924 sctp_flight_size_decrease(tp1);
2925 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2926 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2929 sctp_total_flight_decrease(stcb, tp1);
2931 tp1->whoTo->net_ack += tp1->send_size;
2932 if (tp1->snd_count < 2) {
2934 * True non-retransmited chunk
2936 tp1->whoTo->net_ack2 += tp1->send_size;
2944 sctp_calculate_rto(stcb,
2947 &tp1->sent_rcv_time,
2948 sctp_align_safe_nocopy,
2949 SCTP_RTT_FROM_DATA);
2952 if (tp1->whoTo->rto_needed == 0) {
2953 tp1->whoTo->rto_needed = 1;
2959 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2960 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2961 stcb->asoc.this_sack_highest_gap)) {
2962 stcb->asoc.this_sack_highest_gap =
2963 tp1->rec.data.TSN_seq;
2965 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2966 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2967 #ifdef SCTP_AUDITING_ENABLED
2968 sctp_audit_log(0xB2,
2969 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2974 * All chunks NOT UNSENT fall through here and are marked
2975 * (leave PR-SCTP ones that are to skip alone though)
2977 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2978 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2979 tp1->sent = SCTP_DATAGRAM_MARKED;
2981 if (tp1->rec.data.chunk_was_revoked) {
2982 /* deflate the cwnd */
2983 tp1->whoTo->cwnd -= tp1->book_size;
2984 tp1->rec.data.chunk_was_revoked = 0;
2986 /* NR Sack code here */
2988 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2989 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2990 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2993 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2996 if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
2997 (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
2998 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
2999 stcb->asoc.trigger_reset = 1;
3001 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3007 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3008 sctp_m_freem(tp1->data);
3015 } /* if (tp1->TSN_seq == theTSN) */
3016 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3019 tp1 = TAILQ_NEXT(tp1, sctp_next);
3020 if ((tp1 == NULL) && (circled == 0)) {
3022 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3024 } /* end while (tp1) */
3027 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3029 /* In case the fragments were not in order we must reset */
3030 } /* end for (j = fragStart */
3032 return (wake_him); /* Return value only used for nr-sack */
3037 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3038 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3039 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3040 int num_seg, int num_nr_seg, int *rto_ok)
3042 struct sctp_gap_ack_block *frag, block;
3043 struct sctp_tmit_chunk *tp1;
3048 uint16_t frag_strt, frag_end, prev_frag_end;
3050 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3054 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3057 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3059 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3060 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3061 *offset += sizeof(block);
3063 return (chunk_freed);
3065 frag_strt = ntohs(frag->start);
3066 frag_end = ntohs(frag->end);
3068 if (frag_strt > frag_end) {
3069 /* This gap report is malformed, skip it. */
3072 if (frag_strt <= prev_frag_end) {
3073 /* This gap report is not in order, so restart. */
3074 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3076 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3077 *biggest_tsn_acked = last_tsn + frag_end;
3084 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3085 non_revocable, &num_frs, biggest_newly_acked_tsn,
3086 this_sack_lowest_newack, rto_ok)) {
3089 prev_frag_end = frag_end;
3091 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3093 sctp_log_fr(*biggest_tsn_acked,
3094 *biggest_newly_acked_tsn,
3095 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3097 return (chunk_freed);
3101 sctp_check_for_revoked(struct sctp_tcb *stcb,
3102 struct sctp_association *asoc, uint32_t cumack,
3103 uint32_t biggest_tsn_acked)
3105 struct sctp_tmit_chunk *tp1;
3107 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3108 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3110 * ok this guy is either ACK or MARKED. If it is
3111 * ACKED it has been previously acked but not this
3112 * time i.e. revoked. If it is MARKED it was ACK'ed
3115 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3118 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3119 /* it has been revoked */
3120 tp1->sent = SCTP_DATAGRAM_SENT;
3121 tp1->rec.data.chunk_was_revoked = 1;
3123 * We must add this stuff back in to assure
3124 * timers and such get started.
3126 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3127 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3128 tp1->whoTo->flight_size,
3130 (uint32_t) (uintptr_t) tp1->whoTo,
3131 tp1->rec.data.TSN_seq);
3133 sctp_flight_size_increase(tp1);
3134 sctp_total_flight_increase(stcb, tp1);
3136 * We inflate the cwnd to compensate for our
3137 * artificial inflation of the flight_size.
3139 tp1->whoTo->cwnd += tp1->book_size;
3140 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3141 sctp_log_sack(asoc->last_acked_seq,
3143 tp1->rec.data.TSN_seq,
3146 SCTP_LOG_TSN_REVOKED);
3148 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3149 /* it has been re-acked in this SACK */
3150 tp1->sent = SCTP_DATAGRAM_ACKED;
3153 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3160 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3161 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3163 struct sctp_tmit_chunk *tp1;
3164 int strike_flag = 0;
3166 int tot_retrans = 0;
3167 uint32_t sending_seq;
3168 struct sctp_nets *net;
3169 int num_dests_sacked = 0;
3172 * select the sending_seq, this is either the next thing ready to be
3173 * sent but not transmitted, OR, the next seq we assign.
3175 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3177 sending_seq = asoc->sending_seq;
3179 sending_seq = tp1->rec.data.TSN_seq;
3182 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3183 if ((asoc->sctp_cmt_on_off > 0) &&
3184 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3185 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3186 if (net->saw_newack)
3190 if (stcb->asoc.prsctp_supported) {
3191 (void)SCTP_GETTIME_TIMEVAL(&now);
3193 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3195 if (tp1->no_fr_allowed) {
3196 /* this one had a timeout or something */
3199 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3200 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3201 sctp_log_fr(biggest_tsn_newly_acked,
3202 tp1->rec.data.TSN_seq,
3204 SCTP_FR_LOG_CHECK_STRIKE);
3206 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3207 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3211 if (stcb->asoc.prsctp_supported) {
3212 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3213 /* Is it expired? */
3214 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3215 /* Yes so drop it */
3216 if (tp1->data != NULL) {
3217 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3218 SCTP_SO_NOT_LOCKED);
3224 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3225 /* we are beyond the tsn in the sack */
3228 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3229 /* either a RESEND, ACKED, or MARKED */
3231 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3232 /* Continue strikin FWD-TSN chunks */
3233 tp1->rec.data.fwd_tsn_cnt++;
3238 * CMT : SFR algo (covers part of DAC and HTNA as well)
3240 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3242 * No new acks were receieved for data sent to this
3243 * dest. Therefore, according to the SFR algo for
3244 * CMT, no data sent to this dest can be marked for
3245 * FR using this SACK.
3248 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3249 tp1->whoTo->this_sack_highest_newack)) {
3251 * CMT: New acks were receieved for data sent to
3252 * this dest. But no new acks were seen for data
3253 * sent after tp1. Therefore, according to the SFR
3254 * algo for CMT, tp1 cannot be marked for FR using
3255 * this SACK. This step covers part of the DAC algo
3256 * and the HTNA algo as well.
3261 * Here we check to see if we were have already done a FR
3262 * and if so we see if the biggest TSN we saw in the sack is
3263 * smaller than the recovery point. If so we don't strike
3264 * the tsn... otherwise we CAN strike the TSN.
3267 * @@@ JRI: Check for CMT if (accum_moved &&
3268 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3271 if (accum_moved && asoc->fast_retran_loss_recovery) {
3273 * Strike the TSN if in fast-recovery and cum-ack
3276 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3277 sctp_log_fr(biggest_tsn_newly_acked,
3278 tp1->rec.data.TSN_seq,
3280 SCTP_FR_LOG_STRIKE_CHUNK);
3282 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3285 if ((asoc->sctp_cmt_on_off > 0) &&
3286 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3288 * CMT DAC algorithm: If SACK flag is set to
3289 * 0, then lowest_newack test will not pass
3290 * because it would have been set to the
3291 * cumack earlier. If not already to be
3292 * rtx'd, If not a mixed sack and if tp1 is
3293 * not between two sacked TSNs, then mark by
3294 * one more. NOTE that we are marking by one
3295 * additional time since the SACK DAC flag
3296 * indicates that two packets have been
3297 * received after this missing TSN.
3299 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3300 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3301 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3302 sctp_log_fr(16 + num_dests_sacked,
3303 tp1->rec.data.TSN_seq,
3305 SCTP_FR_LOG_STRIKE_CHUNK);
3310 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3311 (asoc->sctp_cmt_on_off == 0)) {
3313 * For those that have done a FR we must take
3314 * special consideration if we strike. I.e the
3315 * biggest_newly_acked must be higher than the
3316 * sending_seq at the time we did the FR.
3319 #ifdef SCTP_FR_TO_ALTERNATE
3321 * If FR's go to new networks, then we must only do
3322 * this for singly homed asoc's. However if the FR's
3323 * go to the same network (Armando's work) then its
3324 * ok to FR multiple times.
3332 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3333 tp1->rec.data.fast_retran_tsn)) {
3335 * Strike the TSN, since this ack is
3336 * beyond where things were when we
3339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3340 sctp_log_fr(biggest_tsn_newly_acked,
3341 tp1->rec.data.TSN_seq,
3343 SCTP_FR_LOG_STRIKE_CHUNK);
3345 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3349 if ((asoc->sctp_cmt_on_off > 0) &&
3350 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3352 * CMT DAC algorithm: If
3353 * SACK flag is set to 0,
3354 * then lowest_newack test
3355 * will not pass because it
3356 * would have been set to
3357 * the cumack earlier. If
3358 * not already to be rtx'd,
3359 * If not a mixed sack and
3360 * if tp1 is not between two
3361 * sacked TSNs, then mark by
3362 * one more. NOTE that we
3363 * are marking by one
3364 * additional time since the
3365 * SACK DAC flag indicates
3366 * that two packets have
3367 * been received after this
3370 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3371 (num_dests_sacked == 1) &&
3372 SCTP_TSN_GT(this_sack_lowest_newack,
3373 tp1->rec.data.TSN_seq)) {
3374 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3375 sctp_log_fr(32 + num_dests_sacked,
3376 tp1->rec.data.TSN_seq,
3378 SCTP_FR_LOG_STRIKE_CHUNK);
3380 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3388 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3391 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3392 biggest_tsn_newly_acked)) {
3394 * We don't strike these: This is the HTNA
3395 * algorithm i.e. we don't strike If our TSN is
3396 * larger than the Highest TSN Newly Acked.
3400 /* Strike the TSN */
3401 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3402 sctp_log_fr(biggest_tsn_newly_acked,
3403 tp1->rec.data.TSN_seq,
3405 SCTP_FR_LOG_STRIKE_CHUNK);
3407 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3410 if ((asoc->sctp_cmt_on_off > 0) &&
3411 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3413 * CMT DAC algorithm: If SACK flag is set to
3414 * 0, then lowest_newack test will not pass
3415 * because it would have been set to the
3416 * cumack earlier. If not already to be
3417 * rtx'd, If not a mixed sack and if tp1 is
3418 * not between two sacked TSNs, then mark by
3419 * one more. NOTE that we are marking by one
3420 * additional time since the SACK DAC flag
3421 * indicates that two packets have been
3422 * received after this missing TSN.
3424 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3425 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3426 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3427 sctp_log_fr(48 + num_dests_sacked,
3428 tp1->rec.data.TSN_seq,
3430 SCTP_FR_LOG_STRIKE_CHUNK);
3436 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3437 struct sctp_nets *alt;
3439 /* fix counts and things */
3440 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3441 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3442 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3444 (uint32_t) (uintptr_t) tp1->whoTo,
3445 tp1->rec.data.TSN_seq);
3448 tp1->whoTo->net_ack++;
3449 sctp_flight_size_decrease(tp1);
3450 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3451 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3455 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3456 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3457 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3459 /* add back to the rwnd */
3460 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3462 /* remove from the total flight */
3463 sctp_total_flight_decrease(stcb, tp1);
3465 if ((stcb->asoc.prsctp_supported) &&
3466 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3468 * Has it been retransmitted tv_sec times? -
3469 * we store the retran count there.
3471 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3472 /* Yes, so drop it */
3473 if (tp1->data != NULL) {
3474 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3475 SCTP_SO_NOT_LOCKED);
3477 /* Make sure to flag we had a FR */
3478 tp1->whoTo->net_ack++;
3483 * SCTP_PRINTF("OK, we are now ready to FR this
3486 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3487 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3491 /* This is a subsequent FR */
3492 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3494 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3495 if (asoc->sctp_cmt_on_off > 0) {
3497 * CMT: Using RTX_SSTHRESH policy for CMT.
3498 * If CMT is being used, then pick dest with
3499 * largest ssthresh for any retransmission.
3501 tp1->no_fr_allowed = 1;
3503 /* sa_ignore NO_NULL_CHK */
3504 if (asoc->sctp_cmt_pf > 0) {
3506 * JRS 5/18/07 - If CMT PF is on,
3507 * use the PF version of
3510 alt = sctp_find_alternate_net(stcb, alt, 2);
3513 * JRS 5/18/07 - If only CMT is on,
3514 * use the CMT version of
3517 /* sa_ignore NO_NULL_CHK */
3518 alt = sctp_find_alternate_net(stcb, alt, 1);
3524 * CUCv2: If a different dest is picked for
3525 * the retransmission, then new
3526 * (rtx-)pseudo_cumack needs to be tracked
3527 * for orig dest. Let CUCv2 track new (rtx-)
3528 * pseudo-cumack always.
3531 tp1->whoTo->find_pseudo_cumack = 1;
3532 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3534 } else {/* CMT is OFF */
3536 #ifdef SCTP_FR_TO_ALTERNATE
3537 /* Can we find an alternate? */
3538 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3541 * default behavior is to NOT retransmit
3542 * FR's to an alternate. Armando Caro's
3543 * paper details why.
3549 tp1->rec.data.doing_fast_retransmit = 1;
3551 /* mark the sending seq for possible subsequent FR's */
3553 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3554 * (uint32_t)tpi->rec.data.TSN_seq);
3556 if (TAILQ_EMPTY(&asoc->send_queue)) {
3558 * If the queue of send is empty then its
3559 * the next sequence number that will be
3560 * assigned so we subtract one from this to
3561 * get the one we last sent.
3563 tp1->rec.data.fast_retran_tsn = sending_seq;
3566 * If there are chunks on the send queue
3567 * (unsent data that has made it from the
3568 * stream queues but not out the door, we
3569 * take the first one (which will have the
3570 * lowest TSN) and subtract one to get the
3573 struct sctp_tmit_chunk *ttt;
3575 ttt = TAILQ_FIRST(&asoc->send_queue);
3576 tp1->rec.data.fast_retran_tsn =
3577 ttt->rec.data.TSN_seq;
3582 * this guy had a RTO calculation pending on
3585 if ((tp1->whoTo != NULL) &&
3586 (tp1->whoTo->rto_needed == 0)) {
3587 tp1->whoTo->rto_needed = 1;
3591 if (alt != tp1->whoTo) {
3592 /* yes, there is an alternate. */
3593 sctp_free_remote_addr(tp1->whoTo);
3594 /* sa_ignore FREED_MEMORY */
3596 atomic_add_int(&alt->ref_count, 1);
3602 struct sctp_tmit_chunk *
3603 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3604 struct sctp_association *asoc)
3606 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3610 if (asoc->prsctp_supported == 0) {
3613 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3614 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3615 tp1->sent != SCTP_DATAGRAM_RESEND &&
3616 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3617 /* no chance to advance, out of here */
3620 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3621 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3622 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3623 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3624 asoc->advanced_peer_ack_point,
3625 tp1->rec.data.TSN_seq, 0, 0);
3628 if (!PR_SCTP_ENABLED(tp1->flags)) {
3630 * We can't fwd-tsn past any that are reliable aka
3631 * retransmitted until the asoc fails.
3636 (void)SCTP_GETTIME_TIMEVAL(&now);
3640 * now we got a chunk which is marked for another
3641 * retransmission to a PR-stream but has run out its chances
3642 * already maybe OR has been marked to skip now. Can we skip
3643 * it if its a resend?
3645 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3646 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3648 * Now is this one marked for resend and its time is
3651 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3652 /* Yes so drop it */
3654 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3655 1, SCTP_SO_NOT_LOCKED);
3659 * No, we are done when hit one for resend
3660 * whos time as not expired.
3666 * Ok now if this chunk is marked to drop it we can clean up
3667 * the chunk, advance our peer ack point and we can check
3670 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3671 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3672 /* advance PeerAckPoint goes forward */
3673 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3674 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3676 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3677 /* No update but we do save the chk */
3682 * If it is still in RESEND we can advance no
3692 sctp_fs_audit(struct sctp_association *asoc)
3694 struct sctp_tmit_chunk *chk;
3695 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3699 int entry_flight, entry_cnt;
3705 entry_flight = asoc->total_flight;
3706 entry_cnt = asoc->total_flight_count;
3708 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3711 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3712 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3713 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3714 chk->rec.data.TSN_seq,
3718 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3720 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3722 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3729 if ((inflight > 0) || (inbetween > 0)) {
3731 panic("Flight size-express incorrect? \n");
3733 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3734 entry_flight, entry_cnt);
3736 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3737 inflight, inbetween, resend, above, acked);
3746 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3747 struct sctp_association *asoc,
3748 struct sctp_tmit_chunk *tp1)
3750 tp1->window_probe = 0;
3751 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3752 /* TSN's skipped we do NOT move back. */
3753 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3754 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3756 (uint32_t) (uintptr_t) tp1->whoTo,
3757 tp1->rec.data.TSN_seq);
3760 /* First setup this by shrinking flight */
3761 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3762 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3765 sctp_flight_size_decrease(tp1);
3766 sctp_total_flight_decrease(stcb, tp1);
3767 /* Now mark for resend */
3768 tp1->sent = SCTP_DATAGRAM_RESEND;
3769 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3771 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3772 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3773 tp1->whoTo->flight_size,
3775 (uint32_t) (uintptr_t) tp1->whoTo,
3776 tp1->rec.data.TSN_seq);
3781 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3782 uint32_t rwnd, int *abort_now, int ecne_seen)
3784 struct sctp_nets *net;
3785 struct sctp_association *asoc;
3786 struct sctp_tmit_chunk *tp1, *tp2;
3788 int win_probe_recovery = 0;
3789 int win_probe_recovered = 0;
3790 int j, done_once = 0;
3794 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3795 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3796 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3798 SCTP_TCB_LOCK_ASSERT(stcb);
3799 #ifdef SCTP_ASOCLOG_OF_TSNS
3800 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3801 stcb->asoc.cumack_log_at++;
3802 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3803 stcb->asoc.cumack_log_at = 0;
3807 old_rwnd = asoc->peers_rwnd;
3808 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3811 } else if (asoc->last_acked_seq == cumack) {
3812 /* Window update sack */
3813 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3814 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3815 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3816 /* SWS sender side engages */
3817 asoc->peers_rwnd = 0;
3819 if (asoc->peers_rwnd > old_rwnd) {
3824 /* First setup for CC stuff */
3825 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3826 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3827 /* Drag along the window_tsn for cwr's */
3828 net->cwr_window_tsn = cumack;
3830 net->prev_cwnd = net->cwnd;
3835 * CMT: Reset CUC and Fast recovery algo variables before
3838 net->new_pseudo_cumack = 0;
3839 net->will_exit_fast_recovery = 0;
3840 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3841 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3844 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3845 tp1 = TAILQ_LAST(&asoc->sent_queue,
3846 sctpchunk_listhead);
3847 send_s = tp1->rec.data.TSN_seq + 1;
3849 send_s = asoc->sending_seq;
3851 if (SCTP_TSN_GE(cumack, send_s)) {
3852 struct mbuf *op_err;
3853 char msg[SCTP_DIAG_INFO_LEN];
3857 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3859 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3860 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3861 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3864 asoc->this_sack_highest_gap = cumack;
3865 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3866 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3867 stcb->asoc.overall_error_count,
3869 SCTP_FROM_SCTP_INDATA,
3872 stcb->asoc.overall_error_count = 0;
3873 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3874 /* process the new consecutive TSN first */
3875 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3876 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3877 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3878 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3880 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3882 * If it is less than ACKED, it is
3883 * now no-longer in flight. Higher
3884 * values may occur during marking
3886 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3887 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3888 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3889 tp1->whoTo->flight_size,
3891 (uint32_t) (uintptr_t) tp1->whoTo,
3892 tp1->rec.data.TSN_seq);
3894 sctp_flight_size_decrease(tp1);
3895 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3896 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3899 /* sa_ignore NO_NULL_CHK */
3900 sctp_total_flight_decrease(stcb, tp1);
3902 tp1->whoTo->net_ack += tp1->send_size;
3903 if (tp1->snd_count < 2) {
3905 * True non-retransmited
3908 tp1->whoTo->net_ack2 +=
3911 /* update RTO too? */
3920 sctp_calculate_rto(stcb,
3922 &tp1->sent_rcv_time,
3923 sctp_align_safe_nocopy,
3924 SCTP_RTT_FROM_DATA);
3927 if (tp1->whoTo->rto_needed == 0) {
3928 tp1->whoTo->rto_needed = 1;
3934 * CMT: CUCv2 algorithm. From the
3935 * cumack'd TSNs, for each TSN being
3936 * acked for the first time, set the
3937 * following variables for the
3938 * corresp destination.
3939 * new_pseudo_cumack will trigger a
3941 * find_(rtx_)pseudo_cumack will
3942 * trigger search for the next
3943 * expected (rtx-)pseudo-cumack.
3945 tp1->whoTo->new_pseudo_cumack = 1;
3946 tp1->whoTo->find_pseudo_cumack = 1;
3947 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3949 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3950 /* sa_ignore NO_NULL_CHK */
3951 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3954 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3955 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3957 if (tp1->rec.data.chunk_was_revoked) {
3958 /* deflate the cwnd */
3959 tp1->whoTo->cwnd -= tp1->book_size;
3960 tp1->rec.data.chunk_was_revoked = 0;
3962 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3963 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3964 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3967 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3971 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3972 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3973 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3974 asoc->trigger_reset = 1;
3976 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3978 /* sa_ignore NO_NULL_CHK */
3979 sctp_free_bufspace(stcb, asoc, tp1, 1);
3980 sctp_m_freem(tp1->data);
3983 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3984 sctp_log_sack(asoc->last_acked_seq,
3986 tp1->rec.data.TSN_seq,
3989 SCTP_LOG_FREE_SENT);
3991 asoc->sent_queue_cnt--;
3992 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3999 /* sa_ignore NO_NULL_CHK */
4000 if (stcb->sctp_socket) {
4001 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4005 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4006 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4007 /* sa_ignore NO_NULL_CHK */
4008 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4010 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4011 so = SCTP_INP_SO(stcb->sctp_ep);
4012 atomic_add_int(&stcb->asoc.refcnt, 1);
4013 SCTP_TCB_UNLOCK(stcb);
4014 SCTP_SOCKET_LOCK(so, 1);
4015 SCTP_TCB_LOCK(stcb);
4016 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4017 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4018 /* assoc was freed while we were unlocked */
4019 SCTP_SOCKET_UNLOCK(so, 1);
4023 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4024 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4025 SCTP_SOCKET_UNLOCK(so, 1);
4028 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4029 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4033 /* JRS - Use the congestion control given in the CC module */
4034 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4035 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4036 if (net->net_ack2 > 0) {
4038 * Karn's rule applies to clearing error
4039 * count, this is optional.
4041 net->error_count = 0;
4042 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4043 /* addr came good */
4044 net->dest_state |= SCTP_ADDR_REACHABLE;
4045 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4046 0, (void *)net, SCTP_SO_NOT_LOCKED);
4048 if (net == stcb->asoc.primary_destination) {
4049 if (stcb->asoc.alternate) {
4051 * release the alternate,
4054 sctp_free_remote_addr(stcb->asoc.alternate);
4055 stcb->asoc.alternate = NULL;
4058 if (net->dest_state & SCTP_ADDR_PF) {
4059 net->dest_state &= ~SCTP_ADDR_PF;
4060 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4061 stcb->sctp_ep, stcb, net,
4062 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4063 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4064 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4065 /* Done with this net */
4068 /* restore any doubled timers */
4069 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4070 if (net->RTO < stcb->asoc.minrto) {
4071 net->RTO = stcb->asoc.minrto;
4073 if (net->RTO > stcb->asoc.maxrto) {
4074 net->RTO = stcb->asoc.maxrto;
4078 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4080 asoc->last_acked_seq = cumack;
4082 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4083 /* nothing left in-flight */
4084 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4085 net->flight_size = 0;
4086 net->partial_bytes_acked = 0;
4088 asoc->total_flight = 0;
4089 asoc->total_flight_count = 0;
4092 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4093 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4094 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4095 /* SWS sender side engages */
4096 asoc->peers_rwnd = 0;
4098 if (asoc->peers_rwnd > old_rwnd) {
4099 win_probe_recovery = 1;
4101 /* Now assure a timer where data is queued at */
4104 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4107 if (win_probe_recovery && (net->window_probe)) {
4108 win_probe_recovered = 1;
4110 * Find first chunk that was used with window probe
4111 * and clear the sent
4113 /* sa_ignore FREED_MEMORY */
4114 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4115 if (tp1->window_probe) {
4116 /* move back to data send queue */
4117 sctp_window_probe_recovery(stcb, asoc, tp1);
4122 if (net->RTO == 0) {
4123 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4125 to_ticks = MSEC_TO_TICKS(net->RTO);
4127 if (net->flight_size) {
4129 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4130 sctp_timeout_handler, &net->rxt_timer);
4131 if (net->window_probe) {
4132 net->window_probe = 0;
4135 if (net->window_probe) {
4137 * In window probes we must assure a timer
4138 * is still running there
4140 net->window_probe = 0;
4141 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4142 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4143 sctp_timeout_handler, &net->rxt_timer);
4145 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4146 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4148 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4153 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4154 (asoc->sent_queue_retran_cnt == 0) &&
4155 (win_probe_recovered == 0) &&
4158 * huh, this should not happen unless all packets are
4159 * PR-SCTP and marked to skip of course.
4161 if (sctp_fs_audit(asoc)) {
4162 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4163 net->flight_size = 0;
4165 asoc->total_flight = 0;
4166 asoc->total_flight_count = 0;
4167 asoc->sent_queue_retran_cnt = 0;
4168 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4169 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4170 sctp_flight_size_increase(tp1);
4171 sctp_total_flight_increase(stcb, tp1);
4172 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4173 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4180 /**********************************/
4181 /* Now what about shutdown issues */
4182 /**********************************/
4183 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4184 /* nothing left on sendqueue.. consider done */
4186 if ((asoc->stream_queue_cnt == 1) &&
4187 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4188 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4189 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4190 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4192 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4193 (asoc->stream_queue_cnt == 0)) {
4194 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4195 /* Need to abort here */
4196 struct mbuf *op_err;
4201 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4202 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4203 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4206 struct sctp_nets *netp;
4208 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4209 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4210 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4212 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4213 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4214 sctp_stop_timers_for_shutdown(stcb);
4215 if (asoc->alternate) {
4216 netp = asoc->alternate;
4218 netp = asoc->primary_destination;
4220 sctp_send_shutdown(stcb, netp);
4221 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4222 stcb->sctp_ep, stcb, netp);
4223 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4224 stcb->sctp_ep, stcb, netp);
4226 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4227 (asoc->stream_queue_cnt == 0)) {
4228 struct sctp_nets *netp;
4230 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4233 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4234 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4235 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4236 sctp_stop_timers_for_shutdown(stcb);
4237 if (asoc->alternate) {
4238 netp = asoc->alternate;
4240 netp = asoc->primary_destination;
4242 sctp_send_shutdown_ack(stcb, netp);
4243 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4244 stcb->sctp_ep, stcb, netp);
4247 /*********************************************/
4248 /* Here we perform PR-SCTP procedures */
4250 /*********************************************/
4251 /* C1. update advancedPeerAckPoint */
4252 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4253 asoc->advanced_peer_ack_point = cumack;
4255 /* PR-Sctp issues need to be addressed too */
4256 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4257 struct sctp_tmit_chunk *lchk;
4258 uint32_t old_adv_peer_ack_point;
4260 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4261 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4262 /* C3. See if we need to send a Fwd-TSN */
4263 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4265 * ISSUE with ECN, see FWD-TSN processing.
4267 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4268 send_forward_tsn(stcb, asoc);
4270 /* try to FR fwd-tsn's that get lost too */
4271 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4272 send_forward_tsn(stcb, asoc);
4277 /* Assure a timer is up */
4278 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4279 stcb->sctp_ep, stcb, lchk->whoTo);
4282 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4283 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4285 stcb->asoc.peers_rwnd,
4286 stcb->asoc.total_flight,
4287 stcb->asoc.total_output_queue_size);
4292 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4293 struct sctp_tcb *stcb,
4294 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4295 int *abort_now, uint8_t flags,
4296 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4298 struct sctp_association *asoc;
4299 struct sctp_tmit_chunk *tp1, *tp2;
4300 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4301 uint16_t wake_him = 0;
4302 uint32_t send_s = 0;
4304 int accum_moved = 0;
4305 int will_exit_fast_recovery = 0;
4306 uint32_t a_rwnd, old_rwnd;
4307 int win_probe_recovery = 0;
4308 int win_probe_recovered = 0;
4309 struct sctp_nets *net = NULL;
4312 uint8_t reneged_all = 0;
4313 uint8_t cmt_dac_flag;
4316 * we take any chance we can to service our queues since we cannot
4317 * get awoken when the socket is read from :<
4320 * Now perform the actual SACK handling: 1) Verify that it is not an
4321 * old sack, if so discard. 2) If there is nothing left in the send
4322 * queue (cum-ack is equal to last acked) then you have a duplicate
4323 * too, update any rwnd change and verify no timers are running.
4324 * then return. 3) Process any new consequtive data i.e. cum-ack
4325 * moved process these first and note that it moved. 4) Process any
4326 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4327 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4328 * sync up flightsizes and things, stop all timers and also check
4329 * for shutdown_pending state. If so then go ahead and send off the
4330 * shutdown. If in shutdown recv, send off the shutdown-ack and
4331 * start that timer, Ret. 9) Strike any non-acked things and do FR
4332 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4333 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4334 * if in shutdown_recv state.
4336 SCTP_TCB_LOCK_ASSERT(stcb);
4338 this_sack_lowest_newack = 0;
4339 SCTP_STAT_INCR(sctps_slowpath_sack);
4341 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4342 #ifdef SCTP_ASOCLOG_OF_TSNS
4343 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4344 stcb->asoc.cumack_log_at++;
4345 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4346 stcb->asoc.cumack_log_at = 0;
4351 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4352 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4353 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4355 old_rwnd = stcb->asoc.peers_rwnd;
4356 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4357 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4358 stcb->asoc.overall_error_count,
4360 SCTP_FROM_SCTP_INDATA,
4363 stcb->asoc.overall_error_count = 0;
4365 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4366 sctp_log_sack(asoc->last_acked_seq,
4373 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4375 uint32_t *dupdata, dblock;
4377 for (i = 0; i < num_dup; i++) {
4378 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4379 sizeof(uint32_t), (uint8_t *) & dblock);
4380 if (dupdata == NULL) {
4383 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4387 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4388 tp1 = TAILQ_LAST(&asoc->sent_queue,
4389 sctpchunk_listhead);
4390 send_s = tp1->rec.data.TSN_seq + 1;
4393 send_s = asoc->sending_seq;
4395 if (SCTP_TSN_GE(cum_ack, send_s)) {
4396 struct mbuf *op_err;
4397 char msg[SCTP_DIAG_INFO_LEN];
4400 * no way, we have not even sent this TSN out yet. Peer is
4401 * hopelessly messed up with us.
4403 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4406 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4407 tp1->rec.data.TSN_seq, (void *)tp1);
4412 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4414 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4415 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4416 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4419 /**********************/
4420 /* 1) check the range */
4421 /**********************/
4422 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4423 /* acking something behind */
4426 /* update the Rwnd of the peer */
4427 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4428 TAILQ_EMPTY(&asoc->send_queue) &&
4429 (asoc->stream_queue_cnt == 0)) {
4430 /* nothing left on send/sent and strmq */
4431 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4432 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4433 asoc->peers_rwnd, 0, 0, a_rwnd);
4435 asoc->peers_rwnd = a_rwnd;
4436 if (asoc->sent_queue_retran_cnt) {
4437 asoc->sent_queue_retran_cnt = 0;
4439 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4440 /* SWS sender side engages */
4441 asoc->peers_rwnd = 0;
4443 /* stop any timers */
4444 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4445 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4446 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4447 net->partial_bytes_acked = 0;
4448 net->flight_size = 0;
4450 asoc->total_flight = 0;
4451 asoc->total_flight_count = 0;
4455 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4456 * things. The total byte count acked is tracked in netAckSz AND
4457 * netAck2 is used to track the total bytes acked that are un-
4458 * amibguious and were never retransmitted. We track these on a per
4459 * destination address basis.
4461 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4462 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4463 /* Drag along the window_tsn for cwr's */
4464 net->cwr_window_tsn = cum_ack;
4466 net->prev_cwnd = net->cwnd;
4471 * CMT: Reset CUC and Fast recovery algo variables before
4474 net->new_pseudo_cumack = 0;
4475 net->will_exit_fast_recovery = 0;
4476 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4477 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4480 /* process the new consecutive TSN first */
4481 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4482 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4483 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4485 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4487 * If it is less than ACKED, it is
4488 * now no-longer in flight. Higher
4489 * values may occur during marking
4491 if ((tp1->whoTo->dest_state &
4492 SCTP_ADDR_UNCONFIRMED) &&
4493 (tp1->snd_count < 2)) {
4495 * If there was no retran
4496 * and the address is
4497 * un-confirmed and we sent
4499 * sacked.. its confirmed,
4502 tp1->whoTo->dest_state &=
4503 ~SCTP_ADDR_UNCONFIRMED;
4505 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4506 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4507 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4508 tp1->whoTo->flight_size,
4510 (uint32_t) (uintptr_t) tp1->whoTo,
4511 tp1->rec.data.TSN_seq);
4513 sctp_flight_size_decrease(tp1);
4514 sctp_total_flight_decrease(stcb, tp1);
4515 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4516 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4520 tp1->whoTo->net_ack += tp1->send_size;
4522 /* CMT SFR and DAC algos */
4523 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4524 tp1->whoTo->saw_newack = 1;
4526 if (tp1->snd_count < 2) {
4528 * True non-retransmited
4531 tp1->whoTo->net_ack2 +=
4534 /* update RTO too? */
4538 sctp_calculate_rto(stcb,
4540 &tp1->sent_rcv_time,
4541 sctp_align_safe_nocopy,
4542 SCTP_RTT_FROM_DATA);
4545 if (tp1->whoTo->rto_needed == 0) {
4546 tp1->whoTo->rto_needed = 1;
4552 * CMT: CUCv2 algorithm. From the
4553 * cumack'd TSNs, for each TSN being
4554 * acked for the first time, set the
4555 * following variables for the
4556 * corresp destination.
4557 * new_pseudo_cumack will trigger a
4559 * find_(rtx_)pseudo_cumack will
4560 * trigger search for the next
4561 * expected (rtx-)pseudo-cumack.
4563 tp1->whoTo->new_pseudo_cumack = 1;
4564 tp1->whoTo->find_pseudo_cumack = 1;
4565 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4568 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4569 sctp_log_sack(asoc->last_acked_seq,
4571 tp1->rec.data.TSN_seq,
4574 SCTP_LOG_TSN_ACKED);
4576 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4577 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4580 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4581 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4582 #ifdef SCTP_AUDITING_ENABLED
4583 sctp_audit_log(0xB3,
4584 (asoc->sent_queue_retran_cnt & 0x000000ff));
4587 if (tp1->rec.data.chunk_was_revoked) {
4588 /* deflate the cwnd */
4589 tp1->whoTo->cwnd -= tp1->book_size;
4590 tp1->rec.data.chunk_was_revoked = 0;
4592 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4593 tp1->sent = SCTP_DATAGRAM_ACKED;
4600 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4601 /* always set this up to cum-ack */
4602 asoc->this_sack_highest_gap = last_tsn;
4604 if ((num_seg > 0) || (num_nr_seg > 0)) {
4607 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4608 * to be greater than the cumack. Also reset saw_newack to 0
4611 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4612 net->saw_newack = 0;
4613 net->this_sack_highest_newack = last_tsn;
4617 * thisSackHighestGap will increase while handling NEW
4618 * segments this_sack_highest_newack will increase while
4619 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4620 * used for CMT DAC algo. saw_newack will also change.
4622 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4623 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4624 num_seg, num_nr_seg, &rto_ok)) {
4628 * validate the biggest_tsn_acked in the gap acks if strict
4629 * adherence is wanted.
4631 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4633 * peer is either confused or we are under attack.
4636 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4637 biggest_tsn_acked, send_s);
4641 /*******************************************/
4642 /* cancel ALL T3-send timer if accum moved */
4643 /*******************************************/
4644 if (asoc->sctp_cmt_on_off > 0) {
4645 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4646 if (net->new_pseudo_cumack)
4647 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4649 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4654 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4655 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4656 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4660 /********************************************/
4661 /* drop the acked chunks from the sentqueue */
4662 /********************************************/
4663 asoc->last_acked_seq = cum_ack;
4665 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4666 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4669 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4670 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4671 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4674 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4678 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4679 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4680 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4681 asoc->trigger_reset = 1;
4683 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4684 if (PR_SCTP_ENABLED(tp1->flags)) {
4685 if (asoc->pr_sctp_cnt != 0)
4686 asoc->pr_sctp_cnt--;
4688 asoc->sent_queue_cnt--;
4690 /* sa_ignore NO_NULL_CHK */
4691 sctp_free_bufspace(stcb, asoc, tp1, 1);
4692 sctp_m_freem(tp1->data);
4694 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4695 asoc->sent_queue_cnt_removeable--;
4698 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4699 sctp_log_sack(asoc->last_acked_seq,
4701 tp1->rec.data.TSN_seq,
4704 SCTP_LOG_FREE_SENT);
4706 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4709 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4711 panic("Warning flight size is positive and should be 0");
4713 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4714 asoc->total_flight);
4716 asoc->total_flight = 0;
4718 /* sa_ignore NO_NULL_CHK */
4719 if ((wake_him) && (stcb->sctp_socket)) {
4720 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4724 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4725 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4726 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4728 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4729 so = SCTP_INP_SO(stcb->sctp_ep);
4730 atomic_add_int(&stcb->asoc.refcnt, 1);
4731 SCTP_TCB_UNLOCK(stcb);
4732 SCTP_SOCKET_LOCK(so, 1);
4733 SCTP_TCB_LOCK(stcb);
4734 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4735 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4736 /* assoc was freed while we were unlocked */
4737 SCTP_SOCKET_UNLOCK(so, 1);
4741 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4742 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4743 SCTP_SOCKET_UNLOCK(so, 1);
4746 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4747 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4751 if (asoc->fast_retran_loss_recovery && accum_moved) {
4752 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4753 /* Setup so we will exit RFC2582 fast recovery */
4754 will_exit_fast_recovery = 1;
4758 * Check for revoked fragments:
4760 * if Previous sack - Had no frags then we can't have any revoked if
4761 * Previous sack - Had frag's then - If we now have frags aka
4762 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4763 * some of them. else - The peer revoked all ACKED fragments, since
4764 * we had some before and now we have NONE.
4768 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4769 asoc->saw_sack_with_frags = 1;
4770 } else if (asoc->saw_sack_with_frags) {
4771 int cnt_revoked = 0;
4773 /* Peer revoked all dg's marked or acked */
4774 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4775 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4776 tp1->sent = SCTP_DATAGRAM_SENT;
4777 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4778 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4779 tp1->whoTo->flight_size,
4781 (uint32_t) (uintptr_t) tp1->whoTo,
4782 tp1->rec.data.TSN_seq);
4784 sctp_flight_size_increase(tp1);
4785 sctp_total_flight_increase(stcb, tp1);
4786 tp1->rec.data.chunk_was_revoked = 1;
4788 * To ensure that this increase in
4789 * flightsize, which is artificial, does not
4790 * throttle the sender, we also increase the
4791 * cwnd artificially.
4793 tp1->whoTo->cwnd += tp1->book_size;
4800 asoc->saw_sack_with_frags = 0;
4803 asoc->saw_sack_with_nr_frags = 1;
4805 asoc->saw_sack_with_nr_frags = 0;
4807 /* JRS - Use the congestion control given in the CC module */
4808 if (ecne_seen == 0) {
4809 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4810 if (net->net_ack2 > 0) {
4812 * Karn's rule applies to clearing error
4813 * count, this is optional.
4815 net->error_count = 0;
4816 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4817 /* addr came good */
4818 net->dest_state |= SCTP_ADDR_REACHABLE;
4819 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4820 0, (void *)net, SCTP_SO_NOT_LOCKED);
4822 if (net == stcb->asoc.primary_destination) {
4823 if (stcb->asoc.alternate) {
4825 * release the alternate,
4828 sctp_free_remote_addr(stcb->asoc.alternate);
4829 stcb->asoc.alternate = NULL;
4832 if (net->dest_state & SCTP_ADDR_PF) {
4833 net->dest_state &= ~SCTP_ADDR_PF;
4834 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4835 stcb->sctp_ep, stcb, net,
4836 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4837 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4838 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4839 /* Done with this net */
4842 /* restore any doubled timers */
4843 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4844 if (net->RTO < stcb->asoc.minrto) {
4845 net->RTO = stcb->asoc.minrto;
4847 if (net->RTO > stcb->asoc.maxrto) {
4848 net->RTO = stcb->asoc.maxrto;
4852 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4854 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4855 /* nothing left in-flight */
4856 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4857 /* stop all timers */
4858 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4860 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4861 net->flight_size = 0;
4862 net->partial_bytes_acked = 0;
4864 asoc->total_flight = 0;
4865 asoc->total_flight_count = 0;
4867 /**********************************/
4868 /* Now what about shutdown issues */
4869 /**********************************/
4870 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4871 /* nothing left on sendqueue.. consider done */
4872 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4873 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4874 asoc->peers_rwnd, 0, 0, a_rwnd);
4876 asoc->peers_rwnd = a_rwnd;
4877 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4878 /* SWS sender side engages */
4879 asoc->peers_rwnd = 0;
4882 if ((asoc->stream_queue_cnt == 1) &&
4883 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4884 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4885 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4886 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4888 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4889 (asoc->stream_queue_cnt == 0)) {
4890 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4891 /* Need to abort here */
4892 struct mbuf *op_err;
4897 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4898 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4899 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4902 struct sctp_nets *netp;
4904 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4905 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4906 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4908 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4909 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4910 sctp_stop_timers_for_shutdown(stcb);
4911 if (asoc->alternate) {
4912 netp = asoc->alternate;
4914 netp = asoc->primary_destination;
4916 sctp_send_shutdown(stcb, netp);
4917 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4918 stcb->sctp_ep, stcb, netp);
4919 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4920 stcb->sctp_ep, stcb, netp);
4923 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4924 (asoc->stream_queue_cnt == 0)) {
4925 struct sctp_nets *netp;
4927 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4930 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4931 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4932 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4933 sctp_stop_timers_for_shutdown(stcb);
4934 if (asoc->alternate) {
4935 netp = asoc->alternate;
4937 netp = asoc->primary_destination;
4939 sctp_send_shutdown_ack(stcb, netp);
4940 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4941 stcb->sctp_ep, stcb, netp);
4946 * Now here we are going to recycle net_ack for a different use...
4949 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4954 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4955 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4956 * automatically ensure that.
4958 if ((asoc->sctp_cmt_on_off > 0) &&
4959 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4960 (cmt_dac_flag == 0)) {
4961 this_sack_lowest_newack = cum_ack;
4963 if ((num_seg > 0) || (num_nr_seg > 0)) {
4964 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4965 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4967 /* JRS - Use the congestion control given in the CC module */
4968 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4970 /* Now are we exiting loss recovery ? */
4971 if (will_exit_fast_recovery) {
4972 /* Ok, we must exit fast recovery */
4973 asoc->fast_retran_loss_recovery = 0;
4975 if ((asoc->sat_t3_loss_recovery) &&
4976 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4977 /* end satellite t3 loss recovery */
4978 asoc->sat_t3_loss_recovery = 0;
4983 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4984 if (net->will_exit_fast_recovery) {
4985 /* Ok, we must exit fast recovery */
4986 net->fast_retran_loss_recovery = 0;
4990 /* Adjust and set the new rwnd value */
4991 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4992 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4993 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4995 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4996 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4997 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4998 /* SWS sender side engages */
4999 asoc->peers_rwnd = 0;
5001 if (asoc->peers_rwnd > old_rwnd) {
5002 win_probe_recovery = 1;
5005 * Now we must setup so we have a timer up for anyone with
5011 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5012 if (win_probe_recovery && (net->window_probe)) {
5013 win_probe_recovered = 1;
5015 * Find first chunk that was used with
5016 * window probe and clear the event. Put
5017 * it back into the send queue as if has
5020 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5021 if (tp1->window_probe) {
5022 sctp_window_probe_recovery(stcb, asoc, tp1);
5027 if (net->flight_size) {
5029 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5030 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5031 stcb->sctp_ep, stcb, net);
5033 if (net->window_probe) {
5034 net->window_probe = 0;
5037 if (net->window_probe) {
5039 * In window probes we must assure a timer
5040 * is still running there
5042 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5043 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5044 stcb->sctp_ep, stcb, net);
5047 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5048 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5050 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5055 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5056 (asoc->sent_queue_retran_cnt == 0) &&
5057 (win_probe_recovered == 0) &&
5060 * huh, this should not happen unless all packets are
5061 * PR-SCTP and marked to skip of course.
5063 if (sctp_fs_audit(asoc)) {
5064 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5065 net->flight_size = 0;
5067 asoc->total_flight = 0;
5068 asoc->total_flight_count = 0;
5069 asoc->sent_queue_retran_cnt = 0;
5070 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5071 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5072 sctp_flight_size_increase(tp1);
5073 sctp_total_flight_increase(stcb, tp1);
5074 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5075 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5082 /*********************************************/
5083 /* Here we perform PR-SCTP procedures */
5085 /*********************************************/
5086 /* C1. update advancedPeerAckPoint */
5087 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5088 asoc->advanced_peer_ack_point = cum_ack;
5090 /* C2. try to further move advancedPeerAckPoint ahead */
5091 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5092 struct sctp_tmit_chunk *lchk;
5093 uint32_t old_adv_peer_ack_point;
5095 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5096 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5097 /* C3. See if we need to send a Fwd-TSN */
5098 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5100 * ISSUE with ECN, see FWD-TSN processing.
5102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5103 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5104 0xee, cum_ack, asoc->advanced_peer_ack_point,
5105 old_adv_peer_ack_point);
5107 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5108 send_forward_tsn(stcb, asoc);
5110 /* try to FR fwd-tsn's that get lost too */
5111 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5112 send_forward_tsn(stcb, asoc);
5117 /* Assure a timer is up */
5118 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5119 stcb->sctp_ep, stcb, lchk->whoTo);
5122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5123 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5125 stcb->asoc.peers_rwnd,
5126 stcb->asoc.total_flight,
5127 stcb->asoc.total_output_queue_size);
5132 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5135 uint32_t cum_ack, a_rwnd;
5137 cum_ack = ntohl(cp->cumulative_tsn_ack);
5138 /* Arrange so a_rwnd does NOT change */
5139 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5141 /* Now call the express sack handling */
5142 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5146 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5147 struct sctp_stream_in *strmin)
5149 struct sctp_queued_to_read *ctl, *nctl;
5150 struct sctp_association *asoc;
5152 int need_reasm_check = 0, old;
5155 tt = strmin->last_sequence_delivered;
5156 if (asoc->idata_supported) {
5162 * First deliver anything prior to and including the stream no that
5165 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5166 if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
5167 /* this is deliverable now */
5168 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5169 if (ctl->on_strm_q) {
5170 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5171 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5172 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5173 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5176 panic("strmin: %p ctl: %p unknown %d",
5177 strmin, ctl, ctl->on_strm_q);
5182 /* subtract pending on streams */
5183 asoc->size_on_all_streams -= ctl->length;
5184 sctp_ucount_decr(asoc->cnt_on_all_streams);
5185 /* deliver it to at least the delivery-q */
5186 if (stcb->sctp_socket) {
5187 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5188 sctp_add_to_readq(stcb->sctp_ep, stcb,
5190 &stcb->sctp_socket->so_rcv,
5191 1, SCTP_READ_LOCK_HELD,
5192 SCTP_SO_NOT_LOCKED);
5195 /* Its a fragmented message */
5196 if (ctl->first_frag_seen) {
5198 * Make it so this is next to
5199 * deliver, we restore later
5201 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5202 need_reasm_check = 1;
5207 /* no more delivery now. */
5211 if (need_reasm_check) {
5214 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5215 if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
5216 /* Restore the next to deliver unless we are ahead */
5217 strmin->last_sequence_delivered = tt;
5220 /* Left the front Partial one on */
5223 need_reasm_check = 0;
5226 * now we must deliver things in queue the normal way if any are
5229 tt = strmin->last_sequence_delivered + 1;
5230 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5231 if (tt == ctl->sinfo_ssn) {
5232 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5233 /* this is deliverable now */
5234 if (ctl->on_strm_q) {
5235 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5236 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5237 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5238 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5241 panic("strmin: %p ctl: %p unknown %d",
5242 strmin, ctl, ctl->on_strm_q);
5247 /* subtract pending on streams */
5248 asoc->size_on_all_streams -= ctl->length;
5249 sctp_ucount_decr(asoc->cnt_on_all_streams);
5250 /* deliver it to at least the delivery-q */
5251 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5252 if (stcb->sctp_socket) {
5253 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5254 sctp_add_to_readq(stcb->sctp_ep, stcb,
5256 &stcb->sctp_socket->so_rcv, 1,
5257 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5260 tt = strmin->last_sequence_delivered + 1;
5262 /* Its a fragmented message */
5263 if (ctl->first_frag_seen) {
5265 * Make it so this is next to
5268 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5269 need_reasm_check = 1;
5277 if (need_reasm_check) {
5278 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5285 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5286 struct sctp_association *asoc,
5287 uint16_t stream, uint32_t seq, int ordered, int old, uint32_t cumtsn)
5289 struct sctp_queued_to_read *control;
5290 struct sctp_stream_in *strm;
5291 struct sctp_tmit_chunk *chk, *nchk;
5292 int cnt_removed = 0;
5295 * For now large messages held on the stream reasm that are complete
5296 * will be tossed too. We could in theory do more work to spin
5297 * through and stop after dumping one msg aka seeing the start of a
5298 * new msg at the head, and call the delivery function... to see if
5299 * it can be delivered... But for now we just dump everything on the
5302 strm = &asoc->strmin[stream];
5303 control = sctp_find_reasm_entry(strm, (uint32_t) seq, ordered, old);
5304 if (control == NULL) {
5308 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5309 /* Purge hanging chunks */
5310 if (old && (ordered == 0)) {
5311 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, cumtsn)) {
5316 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5317 asoc->size_on_reasm_queue -= chk->send_size;
5318 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5320 sctp_m_freem(chk->data);
5323 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5325 if (!TAILQ_EMPTY(&control->reasm)) {
5326 /* This has to be old data, unordered */
5327 if (control->data) {
5328 sctp_m_freem(control->data);
5329 control->data = NULL;
5331 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5332 chk = TAILQ_FIRST(&control->reasm);
5333 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5334 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5335 sctp_add_chk_to_control(control, strm, stcb, asoc,
5336 chk, SCTP_READ_LOCK_HELD);
5338 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5341 if (control->on_strm_q == SCTP_ON_ORDERED) {
5342 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5343 control->on_strm_q = 0;
5344 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5345 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5346 control->on_strm_q = 0;
5348 } else if (control->on_strm_q) {
5349 panic("strm: %p ctl: %p unknown %d",
5350 strm, control, control->on_strm_q);
5353 control->on_strm_q = 0;
5354 if (control->on_read_q == 0) {
5355 sctp_free_remote_addr(control->whoFrom);
5356 if (control->data) {
5357 sctp_m_freem(control->data);
5358 control->data = NULL;
5360 sctp_free_a_readq(stcb, control);
5365 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5366 struct sctp_forward_tsn_chunk *fwd,
5367 int *abort_flag, struct mbuf *m, int offset)
5369 /* The pr-sctp fwd tsn */
5371 * here we will perform all the data receiver side steps for
5372 * processing FwdTSN, as required in by pr-sctp draft:
5374 * Assume we get FwdTSN(x):
5376 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5377 * others we have 3) examine and update re-ordering queue on
5378 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5379 * report where we are.
5381 struct sctp_association *asoc;
5382 uint32_t new_cum_tsn, gap;
5383 unsigned int i, fwd_sz, m_size;
5385 struct sctp_stream_in *strm;
5386 struct sctp_queued_to_read *ctl, *sv;
5389 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5390 SCTPDBG(SCTP_DEBUG_INDATA1,
5391 "Bad size too small/big fwd-tsn\n");
5394 m_size = (stcb->asoc.mapping_array_size << 3);
5395 /*************************************************************/
5396 /* 1. Here we update local cumTSN and shift the bitmap array */
5397 /*************************************************************/
5398 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5400 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5401 /* Already got there ... */
5405 * now we know the new TSN is more advanced, let's find the actual
5408 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5409 asoc->cumulative_tsn = new_cum_tsn;
5410 if (gap >= m_size) {
5411 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5412 struct mbuf *op_err;
5413 char msg[SCTP_DIAG_INFO_LEN];
5416 * out of range (of single byte chunks in the rwnd I
5417 * give out). This must be an attacker.
5420 snprintf(msg, sizeof(msg),
5421 "New cum ack %8.8x too high, highest TSN %8.8x",
5422 new_cum_tsn, asoc->highest_tsn_inside_map);
5423 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5424 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5425 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5428 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5430 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5431 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5432 asoc->highest_tsn_inside_map = new_cum_tsn;
5434 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5435 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5437 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5438 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5441 SCTP_TCB_LOCK_ASSERT(stcb);
5442 for (i = 0; i <= gap; i++) {
5443 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5444 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5445 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5446 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5447 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5452 /*************************************************************/
5453 /* 2. Clear up re-assembly queue */
5454 /*************************************************************/
5456 /* This is now done as part of clearing up the stream/seq */
5457 if (asoc->idata_supported == 0) {
5460 /* Flush all the un-ordered data based on cum-tsn */
5461 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5462 for (sid = 0; sid < asoc->streamincnt; sid++) {
5463 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, 1, new_cum_tsn);
5465 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5467 /*******************************************************/
5468 /* 3. Update the PR-stream re-ordering queues and fix */
5469 /* delivery issues as needed. */
5470 /*******************************************************/
5471 fwd_sz -= sizeof(*fwd);
5474 unsigned int num_str;
5477 uint16_t ordered, flags;
5479 struct sctp_strseq *stseq, strseqbuf;
5480 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5482 offset += sizeof(*fwd);
5484 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5485 if (asoc->idata_supported) {
5486 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5489 num_str = fwd_sz / sizeof(struct sctp_strseq);
5492 for (i = 0; i < num_str; i++) {
5493 if (asoc->idata_supported) {
5494 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5495 sizeof(struct sctp_strseq_mid),
5496 (uint8_t *) & strseqbuf_m);
5497 offset += sizeof(struct sctp_strseq_mid);
5498 if (stseq_m == NULL) {
5501 stream = ntohs(stseq_m->stream);
5502 sequence = ntohl(stseq_m->msg_id);
5503 flags = ntohs(stseq_m->flags);
5504 if (flags & PR_SCTP_UNORDERED_FLAG) {
5510 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5511 sizeof(struct sctp_strseq),
5512 (uint8_t *) & strseqbuf);
5513 offset += sizeof(struct sctp_strseq);
5514 if (stseq == NULL) {
5517 stream = ntohs(stseq->stream);
5518 sequence = (uint32_t) ntohs(stseq->sequence);
5526 * Ok we now look for the stream/seq on the read
5527 * queue where its not all delivered. If we find it
5528 * we transmute the read entry into a PDI_ABORTED.
5530 if (stream >= asoc->streamincnt) {
5531 /* screwed up streams, stop! */
5534 if ((asoc->str_of_pdapi == stream) &&
5535 (asoc->ssn_of_pdapi == sequence)) {
5537 * If this is the one we were partially
5538 * delivering now then we no longer are.
5539 * Note this will change with the reassembly
5542 asoc->fragmented_delivery_inprogress = 0;
5544 strm = &asoc->strmin[stream];
5545 if (asoc->idata_supported == 0) {
5548 for (strm_at = strm->last_sequence_delivered; SCTP_MSGID_GE(1, sequence, strm_at); strm_at++) {
5549 sctp_flush_reassm_for_str_seq(stcb, asoc, stream, strm_at, ordered, old, new_cum_tsn);
5554 for (strm_at = strm->last_sequence_delivered; SCTP_MSGID_GE(0, sequence, strm_at); strm_at++) {
5555 sctp_flush_reassm_for_str_seq(stcb, asoc, stream, strm_at, ordered, old, new_cum_tsn);
5558 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5559 if ((ctl->sinfo_stream == stream) &&
5560 (ctl->sinfo_ssn == sequence)) {
5561 str_seq = (stream << 16) | (0x0000ffff & sequence);
5562 ctl->pdapi_aborted = 1;
5563 sv = stcb->asoc.control_pdapi;
5565 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5566 TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5567 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5568 TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5570 } else if (ctl->on_strm_q) {
5571 panic("strm: %p ctl: %p unknown %d",
5572 strm, ctl, ctl->on_strm_q);
5576 stcb->asoc.control_pdapi = ctl;
5577 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5579 SCTP_PARTIAL_DELIVERY_ABORTED,
5581 SCTP_SO_NOT_LOCKED);
5582 stcb->asoc.control_pdapi = sv;
5584 } else if ((ctl->sinfo_stream == stream) &&
5585 SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
5586 /* We are past our victim SSN */
5590 if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
5591 /* Update the sequence number */
5592 strm->last_sequence_delivered = sequence;
5594 /* now kick the stream the new way */
5595 /* sa_ignore NO_NULL_CHK */
5596 sctp_kick_prsctp_reorder_queue(stcb, strm);
5598 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5601 * Now slide thing forward.
5603 sctp_slide_mapping_arrays(stcb);