2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
54 * NOTES: On the outbound side of things I need to check the sack timer to
55 * see if I should generate a sack into the chunk queue (if I have data to
56 * send that is and will be sending it .. for bundling.
58 * The callback in sctp_usrreq.c will get called when the socket is read from.
59 * This will cause sctp_service_queues() to get called on the top entry in
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64 struct sctp_stream_in *strm,
65 struct sctp_tcb *stcb,
66 struct sctp_association *asoc,
67 struct sctp_tmit_chunk *chk);
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 /* Calculate what the rwnd would be */
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
83 * This is really set wrong with respect to a 1-2-m socket. Since
84 * the sb_cc is the count that everyone as put up. When we re-write
85 * sctp_soreceive then we will fix this so that ONLY this
86 * associations data is taken into account.
88 if (stcb->sctp_socket == NULL) {
91 if (stcb->asoc.sb_cc == 0 &&
92 asoc->size_on_reasm_queue == 0 &&
93 asoc->size_on_all_streams == 0) {
94 /* Full rwnd granted */
95 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
98 /* get actual space */
99 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
101 * take out what has NOT been put on socket queue and we yet hold
104 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 asoc->cnt_on_reasm_queue * MSIZE));
106 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 asoc->cnt_on_all_streams * MSIZE));
112 /* what is the overhead of all these rwnd's */
113 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
115 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 * even it is 0. SWS engaged
118 if (calc < stcb->asoc.my_rwnd_control_len) {
127 * Build out our readq entry based on the incoming packet.
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131 struct sctp_nets *net,
132 uint32_t tsn, uint32_t ppid,
133 uint32_t context, uint16_t stream_no,
134 uint32_t stream_seq, uint8_t flags,
137 struct sctp_queued_to_read *read_queue_e = NULL;
139 sctp_alloc_a_readq(stcb, read_queue_e);
140 if (read_queue_e == NULL) {
143 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 read_queue_e->sinfo_stream = stream_no;
145 read_queue_e->sinfo_ssn = stream_seq;
146 read_queue_e->sinfo_flags = (flags << 8);
147 read_queue_e->sinfo_ppid = ppid;
148 read_queue_e->sinfo_context = context;
149 read_queue_e->sinfo_tsn = tsn;
150 read_queue_e->sinfo_cumtsn = tsn;
151 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
152 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 TAILQ_INIT(&read_queue_e->reasm);
154 read_queue_e->whoFrom = net;
155 atomic_add_int(&net->ref_count, 1);
156 read_queue_e->data = dm;
157 read_queue_e->stcb = stcb;
158 read_queue_e->port_from = stcb->rport;
160 return (read_queue_e);
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
166 struct sctp_extrcvinfo *seinfo;
167 struct sctp_sndrcvinfo *outinfo;
168 struct sctp_rcvinfo *rcvinfo;
169 struct sctp_nxtinfo *nxtinfo;
176 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 /* user does not want any ancillary data */
183 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
186 seinfo = (struct sctp_extrcvinfo *)sinfo;
187 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
190 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
197 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
200 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
206 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
211 SCTP_BUF_LEN(ret) = 0;
213 /* We need a CMSG header followed by the struct */
214 cmh = mtod(ret, struct cmsghdr *);
216 * Make sure that there is no un-initialized padding between the
217 * cmsg header and cmsg data and after the cmsg data.
220 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 cmh->cmsg_level = IPPROTO_SCTP;
222 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 cmh->cmsg_type = SCTP_RCVINFO;
224 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 rcvinfo->rcv_context = sinfo->sinfo_context;
232 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
237 cmh->cmsg_level = IPPROTO_SCTP;
238 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 cmh->cmsg_type = SCTP_NXTINFO;
240 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 nxtinfo->nxt_flags = 0;
243 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 nxtinfo->nxt_flags |= SCTP_UNORDERED;
246 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 nxtinfo->nxt_flags |= SCTP_COMPLETE;
252 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 cmh->cmsg_level = IPPROTO_SCTP;
260 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
262 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 cmh->cmsg_type = SCTP_EXTRCV;
264 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 cmh->cmsg_type = SCTP_SNDRCV;
270 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
280 uint32_t gap, i, cumackp1;
282 int in_r = 0, in_nr = 0;
284 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
287 cumackp1 = asoc->cumulative_tsn + 1;
288 if (SCTP_TSN_GT(cumackp1, tsn)) {
290 * this tsn is behind the cum ack and thus we don't need to
291 * worry about it being moved from one to the other.
295 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 if ((in_r == 0) && (in_nr == 0)) {
300 panic("Things are really messed up now");
302 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 sctp_print_mapping_array(asoc);
307 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
309 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 asoc->highest_tsn_inside_nr_map = tsn;
313 if (tsn == asoc->highest_tsn_inside_map) {
314 /* We must back down to see what the new highest is */
315 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 asoc->highest_tsn_inside_map = i;
324 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331 struct sctp_association *asoc,
332 struct sctp_queued_to_read *control)
334 struct sctp_queued_to_read *at;
335 struct sctp_readhead *q;
336 uint8_t bits, unordered;
338 bits = (control->sinfo_flags >> 8);
339 unordered = bits & SCTP_DATA_UNORDERED;
341 q = &strm->uno_inqueue;
342 if (asoc->idata_supported == 0) {
343 if (!TAILQ_EMPTY(q)) {
345 * Only one stream can be here in old style
350 TAILQ_INSERT_TAIL(q, control, next_instrm);
351 control->on_strm_q = SCTP_ON_UNORDERED;
357 if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
360 if (TAILQ_EMPTY(q)) {
362 TAILQ_INSERT_HEAD(q, control, next_instrm);
364 control->on_strm_q = SCTP_ON_UNORDERED;
366 control->on_strm_q = SCTP_ON_ORDERED;
370 TAILQ_FOREACH(at, q, next_instrm) {
371 if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
373 * one in queue is bigger than the new one,
374 * insert before this one
376 TAILQ_INSERT_BEFORE(at, control, next_instrm);
378 control->on_strm_q = SCTP_ON_UNORDERED;
380 control->on_strm_q = SCTP_ON_ORDERED;
383 } else if (at->msg_id == control->msg_id) {
385 * Gak, He sent me a duplicate msg id
386 * number?? return -1 to abort.
390 if (TAILQ_NEXT(at, next_instrm) == NULL) {
392 * We are at the end, insert it
395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 sctp_log_strm_del(control, at,
397 SCTP_STR_LOG_FROM_INSERT_TL);
399 TAILQ_INSERT_AFTER(q,
400 at, control, next_instrm);
402 control->on_strm_q = SCTP_ON_UNORDERED;
404 control->on_strm_q = SCTP_ON_ORDERED;
415 sctp_abort_in_reasm(struct sctp_tcb *stcb,
416 struct sctp_stream_in *strm,
417 struct sctp_queued_to_read *control,
418 struct sctp_tmit_chunk *chk,
419 int *abort_flag, int opspot)
421 char msg[SCTP_DIAG_INFO_LEN];
424 if (stcb->asoc.idata_supported) {
425 snprintf(msg, sizeof(msg),
426 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
428 control->fsn_included,
429 chk->rec.data.TSN_seq,
430 chk->rec.data.stream_number,
431 chk->rec.data.fsn_num, chk->rec.data.stream_seq);
433 snprintf(msg, sizeof(msg),
434 "Reass %x, CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x, SSN:%4.4x",
436 control->fsn_included,
437 chk->rec.data.TSN_seq,
438 chk->rec.data.stream_number,
439 chk->rec.data.fsn_num,
440 (uint16_t) chk->rec.data.stream_seq);
442 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
443 sctp_m_freem(chk->data);
445 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
446 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
447 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
452 clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
455 * The control could not be placed and must be cleaned.
457 struct sctp_tmit_chunk *chk, *nchk;
459 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
460 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
462 sctp_m_freem(chk->data);
464 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
466 sctp_free_a_readq(stcb, control);
470 * Queue the chunk either right into the socket buffer if it is the next one
471 * to go OR put it in the correct place in the delivery queue. If we do
472 * append to the so_buf, keep doing so until we are out of order as
473 * long as the control's entered are non-fragmented.
476 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
477 struct sctp_stream_in *strm,
478 struct sctp_association *asoc,
479 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
482 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
483 * all the data in one stream this could happen quite rapidly. One
484 * could use the TSN to keep track of things, but this scheme breaks
485 * down in the other type of stream useage that could occur. Send a
486 * single msg to stream 0, send 4Billion messages to stream 1, now
487 * send a message to stream 0. You have a situation where the TSN
488 * has wrapped but not in the stream. Is this worth worrying about
489 * or should we just change our queue sort at the bottom to be by
492 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
493 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
494 * assignment this could happen... and I don't see how this would be
495 * a violation. So for now I am undecided an will leave the sort by
496 * SSN alone. Maybe a hybred approach is the answer
499 struct sctp_queued_to_read *at;
503 char msg[SCTP_DIAG_INFO_LEN];
505 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
506 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
508 if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
509 /* The incoming sseq is behind where we last delivered? */
510 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
511 control->sinfo_ssn, strm->last_sequence_delivered);
514 * throw it in the stream so it gets cleaned up in
515 * association destruction
517 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
518 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
519 strm->last_sequence_delivered, control->sinfo_tsn,
520 control->sinfo_stream, control->sinfo_ssn);
521 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
522 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
523 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
528 if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
532 asoc->size_on_all_streams += control->length;
533 sctp_ucount_incr(asoc->cnt_on_all_streams);
534 nxt_todel = strm->last_sequence_delivered + 1;
535 if (nxt_todel == control->sinfo_ssn) {
536 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
539 so = SCTP_INP_SO(stcb->sctp_ep);
540 atomic_add_int(&stcb->asoc.refcnt, 1);
541 SCTP_TCB_UNLOCK(stcb);
542 SCTP_SOCKET_LOCK(so, 1);
544 atomic_subtract_int(&stcb->asoc.refcnt, 1);
545 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
546 SCTP_SOCKET_UNLOCK(so, 1);
550 /* can be delivered right away? */
551 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
552 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
554 /* EY it wont be queued if it could be delivered directly */
556 asoc->size_on_all_streams -= control->length;
557 sctp_ucount_decr(asoc->cnt_on_all_streams);
558 strm->last_sequence_delivered++;
559 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
560 sctp_add_to_readq(stcb->sctp_ep, stcb,
562 &stcb->sctp_socket->so_rcv, 1,
563 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
564 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
566 nxt_todel = strm->last_sequence_delivered + 1;
567 if ((nxt_todel == control->sinfo_ssn) &&
568 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
569 asoc->size_on_all_streams -= control->length;
570 sctp_ucount_decr(asoc->cnt_on_all_streams);
571 if (control->on_strm_q == SCTP_ON_ORDERED) {
572 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
575 panic("Huh control: %p is on_strm_q: %d",
576 control, control->on_strm_q);
579 control->on_strm_q = 0;
580 strm->last_sequence_delivered++;
582 * We ignore the return of deliver_data here
583 * since we always can hold the chunk on the
584 * d-queue. And we have a finite number that
585 * can be delivered from the strq.
587 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
588 sctp_log_strm_del(control, NULL,
589 SCTP_STR_LOG_FROM_IMMED_DEL);
591 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
592 sctp_add_to_readq(stcb->sctp_ep, stcb,
594 &stcb->sctp_socket->so_rcv, 1,
595 SCTP_READ_LOCK_NOT_HELD,
598 } else if (nxt_todel == control->sinfo_ssn) {
603 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
604 SCTP_SOCKET_UNLOCK(so, 1);
609 * Ok, we did not deliver this guy, find the correct place
610 * to put it on the queue.
612 if (sctp_place_control_in_stream(strm, asoc, control)) {
613 char msg[SCTP_DIAG_INFO_LEN];
616 snprintf(msg, sizeof(msg),
617 "Queue to str msg_id: %u duplicate",
619 clean_up_control(stcb, control);
620 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
621 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
622 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
630 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
632 struct mbuf *m, *prev = NULL;
633 struct sctp_tcb *stcb;
635 stcb = control->stcb;
636 control->held_length = 0;
640 if (SCTP_BUF_LEN(m) == 0) {
641 /* Skip mbufs with NO length */
644 control->data = sctp_m_free(m);
647 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
648 m = SCTP_BUF_NEXT(prev);
651 control->tail_mbuf = prev;
656 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
657 if (control->on_read_q) {
659 * On read queue so we must increment the SB stuff,
660 * we assume caller has done any locks of SB.
662 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
664 m = SCTP_BUF_NEXT(m);
667 control->tail_mbuf = prev;
672 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
674 struct mbuf *prev = NULL;
675 struct sctp_tcb *stcb;
677 stcb = control->stcb;
680 panic("Control broken");
685 if (control->tail_mbuf == NULL) {
688 sctp_setup_tail_pointer(control);
691 control->tail_mbuf->m_next = m;
693 if (SCTP_BUF_LEN(m) == 0) {
694 /* Skip mbufs with NO length */
697 control->tail_mbuf->m_next = sctp_m_free(m);
698 m = control->tail_mbuf->m_next;
700 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
701 m = SCTP_BUF_NEXT(prev);
704 control->tail_mbuf = prev;
709 if (control->on_read_q) {
711 * On read queue so we must increment the SB stuff,
712 * we assume caller has done any locks of SB.
714 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
716 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
717 m = SCTP_BUF_NEXT(m);
720 control->tail_mbuf = prev;
725 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
727 memset(nc, 0, sizeof(struct sctp_queued_to_read));
728 nc->sinfo_stream = control->sinfo_stream;
729 nc->sinfo_ssn = control->sinfo_ssn;
730 TAILQ_INIT(&nc->reasm);
731 nc->top_fsn = control->top_fsn;
732 nc->msg_id = control->msg_id;
733 nc->sinfo_flags = control->sinfo_flags;
734 nc->sinfo_ppid = control->sinfo_ppid;
735 nc->sinfo_context = control->sinfo_context;
736 nc->fsn_included = 0xffffffff;
737 nc->sinfo_tsn = control->sinfo_tsn;
738 nc->sinfo_cumtsn = control->sinfo_cumtsn;
739 nc->sinfo_assoc_id = control->sinfo_assoc_id;
740 nc->whoFrom = control->whoFrom;
741 atomic_add_int(&nc->whoFrom->ref_count, 1);
742 nc->stcb = control->stcb;
743 nc->port_from = control->port_from;
747 sctp_handle_old_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm,
748 struct sctp_queued_to_read *control, uint32_t pd_point)
751 * Special handling for the old un-ordered data chunk. All the
752 * chunks/TSN's go to msg_id 0. So we have to do the old style
753 * watching to see if we have it all. If you return one, no other
754 * control entries on the un-ordered queue will be looked at. In
755 * theory there should be no others entries in reality, unless the
756 * guy is sending both unordered NDATA and unordered DATA...
758 struct sctp_tmit_chunk *chk, *lchk, *tchk;
760 struct sctp_queued_to_read *nc = NULL;
763 if (control->first_frag_seen == 0) {
764 /* Nothing we can do, we have not seen the first piece yet */
767 /* Collapse any we can */
770 fsn = control->fsn_included + 1;
771 /* Now what can we add? */
772 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
773 if (chk->rec.data.fsn_num == fsn) {
775 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
776 sctp_add_chk_to_control(control, strm, stcb, asoc, chk);
780 if (control->end_added) {
782 if (!TAILQ_EMPTY(&control->reasm)) {
784 * Ok we have to move anything left
785 * on the control queue to a new
788 sctp_alloc_a_readq(stcb, nc);
789 sctp_build_readq_entry_from_ctl(nc, control);
790 tchk = TAILQ_FIRST(&control->reasm);
791 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
792 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
793 nc->first_frag_seen = 1;
794 nc->fsn_included = tchk->rec.data.fsn_num;
795 nc->data = tchk->data;
796 sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
798 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
799 sctp_setup_tail_pointer(nc);
800 tchk = TAILQ_FIRST(&control->reasm);
802 /* Spin the rest onto the queue */
804 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
805 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
806 tchk = TAILQ_FIRST(&control->reasm);
809 * Now lets add it to the queue
810 * after removing control
812 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
813 nc->on_strm_q = SCTP_ON_UNORDERED;
814 if (control->on_strm_q) {
815 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
816 control->on_strm_q = 0;
819 if (control->on_read_q == 0) {
820 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
821 &stcb->sctp_socket->so_rcv, control->end_added,
822 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
824 if (control->pdapi_started) {
825 strm->pd_api_started = 0;
826 control->pdapi_started = 0;
828 if (control->on_strm_q) {
829 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
830 control->on_strm_q = 0;
832 sctp_wakeup_the_read_socket(stcb->sctp_ep);
833 if ((nc) && (nc->first_frag_seen)) {
835 * Switch to the new guy and
849 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
850 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
851 &stcb->sctp_socket->so_rcv, control->end_added,
852 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
853 strm->pd_api_started = 1;
854 control->pdapi_started = 1;
855 sctp_wakeup_the_read_socket(stcb->sctp_ep);
863 sctp_inject_old_data_unordered(struct sctp_tcb *stcb, struct sctp_association *asoc,
864 struct sctp_stream_in *strm,
865 struct sctp_queued_to_read *control,
866 struct sctp_tmit_chunk *chk,
869 struct sctp_tmit_chunk *at;
873 * Here we need to place the chunk into the control structure sorted
874 * in the correct order.
876 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
877 /* Its the very first one. */
878 SCTPDBG(SCTP_DEBUG_XXX,
879 "chunk is a first fsn: %u becomes fsn_included\n",
880 chk->rec.data.fsn_num);
881 if (control->first_frag_seen) {
883 * In old un-ordered we can reassembly on one
884 * control multiple messages. As long as the next
885 * FIRST is greater then the old first (TSN i.e. FSN
891 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
893 * Easy way the start of a new guy beyond
898 if ((chk->rec.data.fsn_num == control->fsn_included) ||
899 (control->pdapi_started)) {
901 * Ok this should not happen, if it does we
902 * started the pd-api on the higher TSN
903 * (since the equals part is a TSN failure
906 * We are completly hosed in that case since I
907 * have no way to recover. This really will
908 * only happen if we can get more TSN's
909 * higher before the pd-api-point.
911 sctp_abort_in_reasm(stcb, strm, control, chk,
913 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
918 * Ok we have two firsts and the one we just got is
919 * smaller than the one we previously placed.. yuck!
920 * We must swap them out.
923 tdata = control->data;
924 control->data = chk->data;
926 /* Swap the lengths */
927 tmp = control->length;
928 control->length = chk->send_size;
929 chk->send_size = tmp;
930 /* Fix the FSN included */
931 tmp = control->fsn_included;
932 control->fsn_included = chk->rec.data.fsn_num;
933 chk->rec.data.fsn_num = tmp;
936 control->first_frag_seen = 1;
937 control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
938 control->data = chk->data;
939 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
941 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
942 sctp_setup_tail_pointer(control);
946 if (TAILQ_EMPTY(&control->reasm)) {
947 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
948 asoc->size_on_reasm_queue += chk->send_size;
949 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
952 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
953 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
955 * This one in queue is bigger than the new one,
956 * insert the new one before at.
958 asoc->size_on_reasm_queue += chk->send_size;
959 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
961 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
963 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
965 * They sent a duplicate fsn number. This really
966 * should not happen since the FSN is a TSN and it
967 * should have been dropped earlier.
970 sctp_m_freem(chk->data);
973 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
974 sctp_abort_in_reasm(stcb, strm, control, chk,
976 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
982 asoc->size_on_reasm_queue += chk->send_size;
983 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
984 control->top_fsn = chk->rec.data.fsn_num;
985 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
990 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm)
993 * Given a stream, strm, see if any of the SSN's on it that are
994 * fragmented are ready to deliver. If so go ahead and place them on
995 * the read queue. In so placing if we have hit the end, then we
996 * need to remove them from the stream's queue.
998 struct sctp_queued_to_read *control, *nctl = NULL;
999 uint32_t next_to_del;
1003 if (stcb->sctp_socket) {
1004 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1005 stcb->sctp_ep->partial_delivery_point);
1007 pd_point = stcb->sctp_ep->partial_delivery_point;
1009 control = TAILQ_FIRST(&strm->uno_inqueue);
1011 (asoc->idata_supported == 0)) {
1012 /* Special handling needed for "old" data format */
1013 nctl = TAILQ_NEXT(control, next_instrm);
1014 if (sctp_handle_old_data(stcb, asoc, strm, control, pd_point)) {
1018 if (strm->pd_api_started) {
1019 /* Can't add more */
1023 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1024 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
1025 nctl = TAILQ_NEXT(control, next_instrm);
1026 if (control->end_added) {
1027 /* We just put the last bit on */
1028 if (control->on_strm_q) {
1030 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1031 panic("Huh control: %p on_q: %d -- not unordered?",
1032 control, control->on_strm_q);
1035 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1036 control->on_strm_q = 0;
1038 if (control->on_read_q == 0) {
1039 sctp_add_to_readq(stcb->sctp_ep, stcb,
1041 &stcb->sctp_socket->so_rcv, control->end_added,
1042 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1045 /* Can we do a PD-API for this un-ordered guy? */
1046 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1047 strm->pd_api_started = 1;
1048 control->pdapi_started = 1;
1049 sctp_add_to_readq(stcb->sctp_ep, stcb,
1051 &stcb->sctp_socket->so_rcv, control->end_added,
1052 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1060 control = TAILQ_FIRST(&strm->inqueue);
1061 if (strm->pd_api_started) {
1062 /* Can't add more */
1065 if (control == NULL) {
1068 if (strm->last_sequence_delivered == control->sinfo_ssn) {
1070 * Ok the guy at the top was being partially delivered
1071 * completed, so we remove it. Note the pd_api flag was
1072 * taken off when the chunk was merged on in
1073 * sctp_queue_data_for_reasm below.
1075 nctl = TAILQ_NEXT(control, next_instrm);
1076 SCTPDBG(SCTP_DEBUG_XXX,
1077 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1078 control, control->end_added, control->sinfo_ssn,
1079 control->top_fsn, control->fsn_included,
1080 strm->last_sequence_delivered);
1081 if (control->end_added) {
1082 if (control->on_strm_q) {
1084 if (control->on_strm_q != SCTP_ON_ORDERED) {
1085 panic("Huh control: %p on_q: %d -- not ordered?",
1086 control, control->on_strm_q);
1089 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1090 control->on_strm_q = 0;
1092 if (control->on_read_q == 0) {
1093 sctp_add_to_readq(stcb->sctp_ep, stcb,
1095 &stcb->sctp_socket->so_rcv, control->end_added,
1096 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1098 if (strm->pd_api_started && control->pdapi_started) {
1099 control->pdapi_started = 0;
1100 strm->pd_api_started = 0;
1105 if (strm->pd_api_started) {
1107 * Can't add more must have gotten an un-ordered above being
1108 * partially delivered.
1113 next_to_del = strm->last_sequence_delivered + 1;
1115 SCTPDBG(SCTP_DEBUG_XXX,
1116 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1117 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
1119 nctl = TAILQ_NEXT(control, next_instrm);
1120 if ((control->sinfo_ssn == next_to_del) &&
1121 (control->first_frag_seen)) {
1122 /* Ok we can deliver it onto the stream. */
1123 if (control->end_added) {
1124 /* We are done with it afterwards */
1125 if (control->on_strm_q) {
1127 if (control->on_strm_q != SCTP_ON_ORDERED) {
1128 panic("Huh control: %p on_q: %d -- not ordered?",
1129 control, control->on_strm_q);
1132 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1133 control->on_strm_q = 0;
1137 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1139 * A singleton now slipping through - mark
1140 * it non-revokable too
1142 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1143 } else if (control->end_added == 0) {
1145 * Check if we can defer adding until its
1148 if ((control->length < pd_point) || (strm->pd_api_started)) {
1150 * Don't need it or cannot add more
1151 * (one being delivered that way)
1156 if (control->on_read_q == 0) {
1157 sctp_add_to_readq(stcb->sctp_ep, stcb,
1159 &stcb->sctp_socket->so_rcv, control->end_added,
1160 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1162 strm->last_sequence_delivered = next_to_del;
1163 if ((control->end_added) && (control->last_frag_seen)) {
1167 /* We are now doing PD API */
1168 strm->pd_api_started = 1;
1169 control->pdapi_started = 1;
1178 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1179 struct sctp_stream_in *strm,
1180 struct sctp_tcb *stcb, struct sctp_association *asoc,
1181 struct sctp_tmit_chunk *chk)
1184 * Given a control and a chunk, merge the data from the chk onto the
1185 * control and free up the chunk resources.
1189 if (control->on_read_q) {
1191 * Its being pd-api'd so we must do some locks.
1193 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1196 if (control->data == NULL) {
1197 control->data = chk->data;
1198 sctp_setup_tail_pointer(control);
1200 sctp_add_to_tail_pointer(control, chk->data);
1202 control->fsn_included = chk->rec.data.fsn_num;
1203 asoc->size_on_reasm_queue -= chk->send_size;
1204 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1205 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1207 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1208 control->first_frag_seen = 1;
1210 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1212 if ((control->on_strm_q) && (control->on_read_q)) {
1213 if (control->pdapi_started) {
1214 control->pdapi_started = 0;
1215 strm->pd_api_started = 0;
1217 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1219 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1220 control->on_strm_q = 0;
1221 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1223 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1224 control->on_strm_q = 0;
1226 } else if (control->on_strm_q) {
1227 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1228 control->on_strm_q);
1232 control->end_added = 1;
1233 control->last_frag_seen = 1;
1236 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1238 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1242 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1243 * queue, see if anthing can be delivered. If so pull it off (or as much as
1244 * we can. If we run out of space then we must dump what we can and set the
1245 * appropriate flag to say we queued what we could.
1248 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1249 struct sctp_stream_in *strm,
1250 struct sctp_queued_to_read *control,
1251 struct sctp_tmit_chunk *chk,
1252 int created_control,
1253 int *abort_flag, uint32_t tsn)
1256 struct sctp_tmit_chunk *at, *nat;
1257 int cnt_added, unordered;
1260 * For old un-ordered data chunks.
1262 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1267 /* Must be added to the stream-in queue */
1268 if (created_control) {
1269 if (sctp_place_control_in_stream(strm, asoc, control)) {
1270 /* Duplicate SSN? */
1271 clean_up_control(stcb, control);
1272 sctp_abort_in_reasm(stcb, strm, control, chk,
1274 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1277 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1279 * Ok we created this control and now lets validate
1280 * that its legal i.e. there is a B bit set, if not
1281 * and we have up to the cum-ack then its invalid.
1283 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1284 sctp_abort_in_reasm(stcb, strm, control, chk,
1286 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1291 if ((asoc->idata_supported == 0) && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1292 sctp_inject_old_data_unordered(stcb, asoc, strm, control, chk, abort_flag);
1296 * Ok we must queue the chunk into the reasembly portion: o if its
1297 * the first it goes to the control mbuf. o if its not first but the
1298 * next in sequence it goes to the control, and each succeeding one
1299 * in order also goes. o if its not in order we place it on the list
1302 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1303 /* Its the very first one. */
1304 SCTPDBG(SCTP_DEBUG_XXX,
1305 "chunk is a first fsn: %u becomes fsn_included\n",
1306 chk->rec.data.fsn_num);
1307 if (control->first_frag_seen) {
1309 * Error on senders part, they either sent us two
1310 * data chunks with FIRST, or they sent two
1311 * un-ordered chunks that were fragmented at the
1312 * same time in the same stream.
1314 sctp_abort_in_reasm(stcb, strm, control, chk,
1316 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1319 control->first_frag_seen = 1;
1320 control->fsn_included = chk->rec.data.fsn_num;
1321 control->data = chk->data;
1322 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1324 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1325 sctp_setup_tail_pointer(control);
1327 /* Place the chunk in our list */
1330 if (control->last_frag_seen == 0) {
1331 /* Still willing to raise highest FSN seen */
1332 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1333 SCTPDBG(SCTP_DEBUG_XXX,
1334 "We have a new top_fsn: %u\n",
1335 chk->rec.data.fsn_num);
1336 control->top_fsn = chk->rec.data.fsn_num;
1338 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1339 SCTPDBG(SCTP_DEBUG_XXX,
1340 "The last fsn is now in place fsn: %u\n",
1341 chk->rec.data.fsn_num);
1342 control->last_frag_seen = 1;
1344 if (asoc->idata_supported || control->first_frag_seen) {
1346 * For IDATA we always check since we know
1347 * that the first fragment is 0. For old
1348 * DATA we have to receive the first before
1349 * we knwo the first FSN (which is the TSN).
1351 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1353 * We have already delivered up to
1356 sctp_abort_in_reasm(stcb, strm, control, chk,
1358 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1363 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1364 /* Second last? huh? */
1365 SCTPDBG(SCTP_DEBUG_XXX,
1366 "Duplicate last fsn: %u (top: %u) -- abort\n",
1367 chk->rec.data.fsn_num, control->top_fsn);
1368 sctp_abort_in_reasm(stcb, strm, control,
1370 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1373 if (asoc->idata_supported || control->first_frag_seen) {
1375 * For IDATA we always check since we know
1376 * that the first fragment is 0. For old
1377 * DATA we have to receive the first before
1378 * we knwo the first FSN (which is the TSN).
1381 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1383 * We have already delivered up to
1386 SCTPDBG(SCTP_DEBUG_XXX,
1387 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1388 chk->rec.data.fsn_num, control->fsn_included);
1389 sctp_abort_in_reasm(stcb, strm, control, chk,
1391 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1396 * validate not beyond top FSN if we have seen last
1399 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1400 SCTPDBG(SCTP_DEBUG_XXX,
1401 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1402 chk->rec.data.fsn_num,
1404 sctp_abort_in_reasm(stcb, strm, control, chk,
1406 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1411 * If we reach here, we need to place the new chunk in the
1412 * reassembly for this control.
1414 SCTPDBG(SCTP_DEBUG_XXX,
1415 "chunk is a not first fsn: %u needs to be inserted\n",
1416 chk->rec.data.fsn_num);
1417 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1418 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
1420 * This one in queue is bigger than the new
1421 * one, insert the new one before at.
1423 SCTPDBG(SCTP_DEBUG_XXX,
1424 "Insert it before fsn: %u\n",
1425 at->rec.data.fsn_num);
1426 asoc->size_on_reasm_queue += chk->send_size;
1427 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1428 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1431 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
1433 * Gak, He sent me a duplicate str seq
1437 * foo bar, I guess I will just free this
1438 * new guy, should we abort too? FIX ME
1439 * MAYBE? Or it COULD be that the SSN's have
1440 * wrapped. Maybe I should compare to TSN
1441 * somehow... sigh for now just blow away
1444 SCTPDBG(SCTP_DEBUG_XXX,
1445 "Duplicate to fsn: %u -- abort\n",
1446 at->rec.data.fsn_num);
1447 sctp_abort_in_reasm(stcb, strm, control,
1449 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1453 if (inserted == 0) {
1454 /* Goes on the end */
1455 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1456 chk->rec.data.fsn_num);
1457 asoc->size_on_reasm_queue += chk->send_size;
1458 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1459 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1463 * Ok lets see if we can suck any up into the control structure that
1464 * are in seq if it makes sense.
1468 * If the first fragment has not been seen there is no sense in
1471 if (control->first_frag_seen) {
1472 next_fsn = control->fsn_included + 1;
1473 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1474 if (at->rec.data.fsn_num == next_fsn) {
1475 /* We can add this one now to the control */
1476 SCTPDBG(SCTP_DEBUG_XXX,
1477 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1479 at->rec.data.fsn_num,
1480 next_fsn, control->fsn_included);
1481 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1482 sctp_add_chk_to_control(control, strm, stcb, asoc, at);
1485 if (control->end_added && control->pdapi_started) {
1486 if (strm->pd_api_started) {
1487 strm->pd_api_started = 0;
1488 control->pdapi_started = 0;
1490 if (control->on_read_q == 0) {
1491 sctp_add_to_readq(stcb->sctp_ep, stcb,
1493 &stcb->sctp_socket->so_rcv, control->end_added,
1494 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1503 if ((control->on_read_q) && (cnt_added > 0)) {
1504 /* Need to wakeup the reader */
1505 sctp_wakeup_the_read_socket(stcb->sctp_ep);
1509 static struct sctp_queued_to_read *
1510 find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
1512 struct sctp_queued_to_read *reasm;
1515 TAILQ_FOREACH(reasm, &strm->inqueue, next_instrm) {
1516 if (reasm->msg_id == msg_id) {
1522 reasm = TAILQ_FIRST(&strm->uno_inqueue);
1525 TAILQ_FOREACH(reasm, &strm->uno_inqueue, next_instrm) {
1526 if (reasm->msg_id == msg_id) {
1536 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1537 struct mbuf **m, int offset, int chk_length,
1538 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1539 int *break_flag, int last_chunk, uint8_t chtype)
1541 /* Process a data chunk */
1542 /* struct sctp_tmit_chunk *chk; */
1543 struct sctp_data_chunk *ch;
1544 struct sctp_idata_chunk *nch, chunk_buf;
1545 struct sctp_tmit_chunk *chk;
1546 uint32_t tsn, fsn, gap, msg_id;
1549 int need_reasm_check = 0;
1551 struct mbuf *op_err;
1552 char msg[SCTP_DIAG_INFO_LEN];
1553 struct sctp_queued_to_read *control = NULL;
1554 uint32_t protocol_id;
1555 uint8_t chunk_flags;
1556 struct sctp_stream_reset_list *liste;
1557 struct sctp_stream_in *strm;
1560 int created_control = 0;
1564 if (chtype == SCTP_IDATA) {
1565 nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1566 sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1568 ch = (struct sctp_data_chunk *)nch;
1569 clen = sizeof(struct sctp_idata_chunk);
1570 tsn = ntohl(ch->dp.tsn);
1571 msg_id = ntohl(nch->dp.msg_id);
1572 if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1575 fsn = ntohl(nch->dp.ppid_fsn.fsn);
1578 ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1579 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1581 tsn = ntohl(ch->dp.tsn);
1582 clen = sizeof(struct sctp_data_chunk);
1584 msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
1588 chunk_flags = ch->ch.chunk_flags;
1589 if ((size_t)chk_length == clen) {
1591 * Need to send an abort since we had a empty data chunk.
1593 struct mbuf *op_err;
1595 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1596 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1597 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1601 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1602 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1603 asoc->send_sack = 1;
1605 protocol_id = ch->dp.protocol_id;
1606 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1607 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1608 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1613 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1614 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1615 /* It is a duplicate */
1616 SCTP_STAT_INCR(sctps_recvdupdata);
1617 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1618 /* Record a dup for the next outbound sack */
1619 asoc->dup_tsns[asoc->numduptsns] = tsn;
1622 asoc->send_sack = 1;
1625 /* Calculate the number of TSN's between the base and this TSN */
1626 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1627 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1628 /* Can't hold the bit in the mapping at max array, toss it */
1631 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1632 SCTP_TCB_LOCK_ASSERT(stcb);
1633 if (sctp_expand_mapping_array(asoc, gap)) {
1634 /* Can't expand, drop it */
1638 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1641 /* See if we have received this one already */
1642 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1643 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1644 SCTP_STAT_INCR(sctps_recvdupdata);
1645 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1646 /* Record a dup for the next outbound sack */
1647 asoc->dup_tsns[asoc->numduptsns] = tsn;
1650 asoc->send_sack = 1;
1654 * Check to see about the GONE flag, duplicates would cause a sack
1655 * to be sent up above
1657 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1658 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1659 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1661 * wait a minute, this guy is gone, there is no longer a
1662 * receiver. Send peer an ABORT!
1664 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1665 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1670 * Now before going further we see if there is room. If NOT then we
1671 * MAY let one through only IF this TSN is the one we are waiting
1672 * for on a partial delivery API.
1675 /* Is the stream valid? */
1676 strmno = ntohs(ch->dp.stream_id);
1678 if (strmno >= asoc->streamincnt) {
1679 struct sctp_error_invalid_stream *cause;
1681 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1682 0, M_NOWAIT, 1, MT_DATA);
1683 if (op_err != NULL) {
1684 /* add some space up front so prepend will work well */
1685 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1686 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1688 * Error causes are just param's and this one has
1689 * two back to back phdr, one with the error type
1690 * and size, the other with the streamid and a rsvd
1692 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1693 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1694 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1695 cause->stream_id = ch->dp.stream_id;
1696 cause->reserved = htons(0);
1697 sctp_queue_op_err(stcb, op_err);
1699 SCTP_STAT_INCR(sctps_badsid);
1700 SCTP_TCB_LOCK_ASSERT(stcb);
1701 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1702 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1703 asoc->highest_tsn_inside_nr_map = tsn;
1705 if (tsn == (asoc->cumulative_tsn + 1)) {
1706 /* Update cum-ack */
1707 asoc->cumulative_tsn = tsn;
1711 strm = &asoc->strmin[strmno];
1713 * If its a fragmented message, lets see if we can find the control
1714 * on the reassembly queues.
1716 if ((chtype == SCTP_IDATA) && ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) && (fsn == 0)) {
1718 * The first *must* be fsn 0, and other (middle/end) pieces
1719 * can *not* be fsn 0.
1723 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1724 /* See if we can find the re-assembly entity */
1725 control = find_reasm_entry(strm, msg_id, ordered, old_data);
1726 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1727 chunk_flags, control);
1729 /* We found something, does it belong? */
1730 if (ordered && (msg_id != control->sinfo_ssn)) {
1732 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1733 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1734 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1738 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1740 * We can't have a switched order with an
1745 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1747 * We can't have a switched unordered with a
1755 * Its a complete segment. Lets validate we don't have a
1756 * re-assembly going on with the same Stream/Seq (for
1757 * ordered) or in the same Stream for unordered.
1759 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for msg in case we have dup\n",
1761 if (find_reasm_entry(strm, msg_id, ordered, old_data)) {
1762 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n",
1769 /* now do the tests */
1770 if (((asoc->cnt_on_all_streams +
1771 asoc->cnt_on_reasm_queue +
1772 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1773 (((int)asoc->my_rwnd) <= 0)) {
1775 * When we have NO room in the rwnd we check to make sure
1776 * the reader is doing its job...
1778 if (stcb->sctp_socket->so_rcv.sb_cc) {
1779 /* some to read, wake-up */
1780 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1783 so = SCTP_INP_SO(stcb->sctp_ep);
1784 atomic_add_int(&stcb->asoc.refcnt, 1);
1785 SCTP_TCB_UNLOCK(stcb);
1786 SCTP_SOCKET_LOCK(so, 1);
1787 SCTP_TCB_LOCK(stcb);
1788 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1789 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1790 /* assoc was freed while we were unlocked */
1791 SCTP_SOCKET_UNLOCK(so, 1);
1795 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1796 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1797 SCTP_SOCKET_UNLOCK(so, 1);
1800 /* now is it in the mapping array of what we have accepted? */
1802 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1803 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1804 /* Nope not in the valid range dump it */
1806 sctp_set_rwnd(stcb, asoc);
1807 if ((asoc->cnt_on_all_streams +
1808 asoc->cnt_on_reasm_queue +
1809 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1810 SCTP_STAT_INCR(sctps_datadropchklmt);
1812 SCTP_STAT_INCR(sctps_datadroprwnd);
1818 if (control == NULL) {
1821 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1826 #ifdef SCTP_ASOCLOG_OF_TSNS
1827 SCTP_TCB_LOCK_ASSERT(stcb);
1828 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1829 asoc->tsn_in_at = 0;
1830 asoc->tsn_in_wrapped = 1;
1832 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1833 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1834 asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
1835 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1836 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1837 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1838 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1839 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1843 * Before we continue lets validate that we are not being fooled by
1844 * an evil attacker. We can only have Nk chunks based on our TSN
1845 * spread allowed by the mapping array N * 8 bits, so there is no
1846 * way our stream sequence numbers could have wrapped. We of course
1847 * only validate the FIRST fragment so the bit must be set.
1849 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1850 (TAILQ_EMPTY(&asoc->resetHead)) &&
1851 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1852 SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
1853 /* The incoming sseq is behind where we last delivered? */
1854 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1855 msg_id, asoc->strmin[strmno].last_sequence_delivered);
1857 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1858 asoc->strmin[strmno].last_sequence_delivered,
1859 tsn, strmno, msg_id);
1860 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1861 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1862 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1866 /************************************
1867 * From here down we may find ch-> invalid
1868 * so its a good idea NOT to use it.
1869 *************************************/
1871 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1873 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1875 if (last_chunk == 0) {
1877 dmbuf = SCTP_M_COPYM(*m,
1878 (offset + sizeof(struct sctp_idata_chunk)),
1881 dmbuf = SCTP_M_COPYM(*m,
1882 (offset + sizeof(struct sctp_data_chunk)),
1885 #ifdef SCTP_MBUF_LOGGING
1886 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1887 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1891 /* We can steal the last chunk */
1895 /* lop off the top part */
1897 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1899 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1901 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1902 l_len = SCTP_BUF_LEN(dmbuf);
1905 * need to count up the size hopefully does not hit
1911 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1912 l_len += SCTP_BUF_LEN(lat);
1915 if (l_len > the_len) {
1916 /* Trim the end round bytes off too */
1917 m_adj(dmbuf, -(l_len - the_len));
1920 if (dmbuf == NULL) {
1921 SCTP_STAT_INCR(sctps_nomem);
1925 * Now no matter what we need a control, get one if we don't have
1926 * one (we may have gotten it above when we found the message was
1929 if (control == NULL) {
1930 sctp_alloc_a_readq(stcb, control);
1931 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1936 if (control == NULL) {
1937 SCTP_STAT_INCR(sctps_nomem);
1940 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1941 control->data = dmbuf;
1942 control->tail_mbuf = NULL;
1943 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1944 control->top_fsn = control->fsn_included = fsn;
1946 created_control = 1;
1948 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n",
1949 chunk_flags, ordered, msg_id, control);
1950 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1951 TAILQ_EMPTY(&asoc->resetHead) &&
1953 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
1954 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1955 /* Candidate for express delivery */
1957 * Its not fragmented, No PD-API is up, Nothing in the
1958 * delivery queue, Its un-ordered OR ordered and the next to
1959 * deliver AND nothing else is stuck on the stream queue,
1960 * And there is room for it in the socket buffer. Lets just
1961 * stuff it up the buffer....
1963 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1964 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1965 asoc->highest_tsn_inside_nr_map = tsn;
1967 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n",
1970 sctp_add_to_readq(stcb->sctp_ep, stcb,
1971 control, &stcb->sctp_socket->so_rcv,
1972 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1974 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1975 /* for ordered, bump what we delivered */
1976 strm->last_sequence_delivered++;
1978 SCTP_STAT_INCR(sctps_recvexpress);
1979 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1980 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
1981 SCTP_STR_LOG_FROM_EXPRS_DEL);
1984 goto finish_express_del;
1986 /* Now will we need a chunk too? */
1987 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1988 sctp_alloc_a_chunk(stcb, chk);
1990 /* No memory so we drop the chunk */
1991 SCTP_STAT_INCR(sctps_nomem);
1992 if (last_chunk == 0) {
1993 /* we copied it, free the copy */
1994 sctp_m_freem(dmbuf);
1998 chk->rec.data.TSN_seq = tsn;
1999 chk->no_fr_allowed = 0;
2000 chk->rec.data.fsn_num = fsn;
2001 chk->rec.data.stream_seq = msg_id;
2002 chk->rec.data.stream_number = strmno;
2003 chk->rec.data.payloadtype = protocol_id;
2004 chk->rec.data.context = stcb->asoc.context;
2005 chk->rec.data.doing_fast_retransmit = 0;
2006 chk->rec.data.rcv_flags = chunk_flags;
2008 chk->send_size = the_len;
2010 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n",
2013 atomic_add_int(&net->ref_count, 1);
2016 /* Set the appropriate TSN mark */
2017 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2018 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2019 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2020 asoc->highest_tsn_inside_nr_map = tsn;
2023 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2024 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2025 asoc->highest_tsn_inside_map = tsn;
2028 /* Now is it complete (i.e. not fragmented)? */
2029 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2031 * Special check for when streams are resetting. We could be
2032 * more smart about this and check the actual stream to see
2033 * if it is not being reset.. that way we would not create a
2034 * HOLB when amongst streams being reset and those not being
2038 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2039 SCTP_TSN_GT(tsn, liste->tsn)) {
2041 * yep its past where we need to reset... go ahead
2044 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2046 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2048 struct sctp_queued_to_read *ctlOn, *nctlOn;
2049 unsigned char inserted = 0;
2051 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2052 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2057 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2062 if (inserted == 0) {
2064 * must be put at end, use prevP
2065 * (all setup from loop) to setup
2068 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2071 goto finish_express_del;
2073 if (chunk_flags & SCTP_DATA_UNORDERED) {
2074 /* queue directly into socket buffer */
2075 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n",
2077 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2078 sctp_add_to_readq(stcb->sctp_ep, stcb,
2080 &stcb->sctp_socket->so_rcv, 1,
2081 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2084 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control,
2086 sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2094 goto finish_express_del;
2096 /* If we reach here its a reassembly */
2097 need_reasm_check = 1;
2098 SCTPDBG(SCTP_DEBUG_XXX,
2099 "Queue data to stream for reasm control: %p msg_id: %u\n",
2101 sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2104 * the assoc is now gone and chk was put onto the reasm
2105 * queue, which has all been freed.
2113 /* Here we tidy up things */
2114 if (tsn == (asoc->cumulative_tsn + 1)) {
2115 /* Update cum-ack */
2116 asoc->cumulative_tsn = tsn;
2122 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2124 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2126 SCTP_STAT_INCR(sctps_recvdata);
2127 /* Set it present please */
2128 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2129 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2131 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2132 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2133 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2135 /* check the special flag for stream resets */
2136 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2137 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2139 * we have finished working through the backlogged TSN's now
2140 * time to reset streams. 1: call reset function. 2: free
2141 * pending_reply space 3: distribute any chunks in
2142 * pending_reply_queue.
2144 struct sctp_queued_to_read *ctl, *nctl;
2146 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2147 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2148 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2149 SCTP_FREE(liste, SCTP_M_STRESET);
2150 /* sa_ignore FREED_MEMORY */
2151 liste = TAILQ_FIRST(&asoc->resetHead);
2152 if (TAILQ_EMPTY(&asoc->resetHead)) {
2153 /* All can be removed */
2154 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2155 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2156 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2162 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2163 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2167 * if ctl->sinfo_tsn is <= liste->tsn we can
2168 * process it which is the NOT of
2169 * ctl->sinfo_tsn > liste->tsn
2171 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2172 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2179 * Now service re-assembly to pick up anything that has been
2180 * held on reassembly queue?
2182 (void)sctp_deliver_reasm_check(stcb, asoc, strm);
2183 need_reasm_check = 0;
2185 if (need_reasm_check) {
2186 /* Another one waits ? */
2187 (void)sctp_deliver_reasm_check(stcb, asoc, strm);
2192 static const int8_t sctp_map_lookup_tab[256] = {
2193 0, 1, 0, 2, 0, 1, 0, 3,
2194 0, 1, 0, 2, 0, 1, 0, 4,
2195 0, 1, 0, 2, 0, 1, 0, 3,
2196 0, 1, 0, 2, 0, 1, 0, 5,
2197 0, 1, 0, 2, 0, 1, 0, 3,
2198 0, 1, 0, 2, 0, 1, 0, 4,
2199 0, 1, 0, 2, 0, 1, 0, 3,
2200 0, 1, 0, 2, 0, 1, 0, 6,
2201 0, 1, 0, 2, 0, 1, 0, 3,
2202 0, 1, 0, 2, 0, 1, 0, 4,
2203 0, 1, 0, 2, 0, 1, 0, 3,
2204 0, 1, 0, 2, 0, 1, 0, 5,
2205 0, 1, 0, 2, 0, 1, 0, 3,
2206 0, 1, 0, 2, 0, 1, 0, 4,
2207 0, 1, 0, 2, 0, 1, 0, 3,
2208 0, 1, 0, 2, 0, 1, 0, 7,
2209 0, 1, 0, 2, 0, 1, 0, 3,
2210 0, 1, 0, 2, 0, 1, 0, 4,
2211 0, 1, 0, 2, 0, 1, 0, 3,
2212 0, 1, 0, 2, 0, 1, 0, 5,
2213 0, 1, 0, 2, 0, 1, 0, 3,
2214 0, 1, 0, 2, 0, 1, 0, 4,
2215 0, 1, 0, 2, 0, 1, 0, 3,
2216 0, 1, 0, 2, 0, 1, 0, 6,
2217 0, 1, 0, 2, 0, 1, 0, 3,
2218 0, 1, 0, 2, 0, 1, 0, 4,
2219 0, 1, 0, 2, 0, 1, 0, 3,
2220 0, 1, 0, 2, 0, 1, 0, 5,
2221 0, 1, 0, 2, 0, 1, 0, 3,
2222 0, 1, 0, 2, 0, 1, 0, 4,
2223 0, 1, 0, 2, 0, 1, 0, 3,
2224 0, 1, 0, 2, 0, 1, 0, 8
2229 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2232 * Now we also need to check the mapping array in a couple of ways.
2233 * 1) Did we move the cum-ack point?
2235 * When you first glance at this you might think that all entries that
2236 * make up the postion of the cum-ack would be in the nr-mapping
2237 * array only.. i.e. things up to the cum-ack are always
2238 * deliverable. Thats true with one exception, when its a fragmented
2239 * message we may not deliver the data until some threshold (or all
2240 * of it) is in place. So we must OR the nr_mapping_array and
2241 * mapping_array to get a true picture of the cum-ack.
2243 struct sctp_association *asoc;
2246 int slide_from, slide_end, lgap, distance;
2247 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2251 old_cumack = asoc->cumulative_tsn;
2252 old_base = asoc->mapping_array_base_tsn;
2253 old_highest = asoc->highest_tsn_inside_map;
2255 * We could probably improve this a small bit by calculating the
2256 * offset of the current cum-ack as the starting point.
2259 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2260 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2264 /* there is a 0 bit */
2265 at += sctp_map_lookup_tab[val];
2269 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2271 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2272 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2274 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2275 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2277 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2278 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2279 sctp_print_mapping_array(asoc);
2280 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2281 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2283 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2284 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2287 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2288 highest_tsn = asoc->highest_tsn_inside_nr_map;
2290 highest_tsn = asoc->highest_tsn_inside_map;
2292 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2293 /* The complete array was completed by a single FR */
2294 /* highest becomes the cum-ack */
2302 /* clear the array */
2303 clr = ((at + 7) >> 3);
2304 if (clr > asoc->mapping_array_size) {
2305 clr = asoc->mapping_array_size;
2307 memset(asoc->mapping_array, 0, clr);
2308 memset(asoc->nr_mapping_array, 0, clr);
2310 for (i = 0; i < asoc->mapping_array_size; i++) {
2311 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2312 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2313 sctp_print_mapping_array(asoc);
2317 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2318 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2319 } else if (at >= 8) {
2320 /* we can slide the mapping array down */
2321 /* slide_from holds where we hit the first NON 0xff byte */
2324 * now calculate the ceiling of the move using our highest
2327 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2328 slide_end = (lgap >> 3);
2329 if (slide_end < slide_from) {
2330 sctp_print_mapping_array(asoc);
2332 panic("impossible slide");
2334 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2335 lgap, slide_end, slide_from, at);
2339 if (slide_end > asoc->mapping_array_size) {
2341 panic("would overrun buffer");
2343 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2344 asoc->mapping_array_size, slide_end);
2345 slide_end = asoc->mapping_array_size;
2348 distance = (slide_end - slide_from) + 1;
2349 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2350 sctp_log_map(old_base, old_cumack, old_highest,
2351 SCTP_MAP_PREPARE_SLIDE);
2352 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2353 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2355 if (distance + slide_from > asoc->mapping_array_size ||
2358 * Here we do NOT slide forward the array so that
2359 * hopefully when more data comes in to fill it up
2360 * we will be able to slide it forward. Really I
2361 * don't think this should happen :-0
2364 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2365 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2366 (uint32_t) asoc->mapping_array_size,
2367 SCTP_MAP_SLIDE_NONE);
2372 for (ii = 0; ii < distance; ii++) {
2373 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2374 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2377 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2378 asoc->mapping_array[ii] = 0;
2379 asoc->nr_mapping_array[ii] = 0;
2381 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2382 asoc->highest_tsn_inside_map += (slide_from << 3);
2384 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2385 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2387 asoc->mapping_array_base_tsn += (slide_from << 3);
2388 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2389 sctp_log_map(asoc->mapping_array_base_tsn,
2390 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2391 SCTP_MAP_SLIDE_RESULT);
2398 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2400 struct sctp_association *asoc;
2401 uint32_t highest_tsn;
2404 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2405 highest_tsn = asoc->highest_tsn_inside_nr_map;
2407 highest_tsn = asoc->highest_tsn_inside_map;
2411 * Now we need to see if we need to queue a sack or just start the
2412 * timer (if allowed).
2414 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2416 * Ok special case, in SHUTDOWN-SENT case. here we maker
2417 * sure SACK timer is off and instead send a SHUTDOWN and a
2420 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2421 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2422 stcb->sctp_ep, stcb, NULL,
2423 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2425 sctp_send_shutdown(stcb,
2426 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2427 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2431 /* is there a gap now ? */
2432 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2435 * CMT DAC algorithm: increase number of packets received
2438 stcb->asoc.cmt_dac_pkts_rcvd++;
2440 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2442 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2444 (stcb->asoc.numduptsns) || /* we have dup's */
2445 (is_a_gap) || /* is still a gap */
2446 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2447 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2450 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2451 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2452 (stcb->asoc.send_sack == 0) &&
2453 (stcb->asoc.numduptsns == 0) &&
2454 (stcb->asoc.delayed_ack) &&
2455 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2458 * CMT DAC algorithm: With CMT, delay acks
2459 * even in the face of
2461 * reordering. Therefore, if acks that do not
2462 * have to be sent because of the above
2463 * reasons, will be delayed. That is, acks
2464 * that would have been sent due to gap
2465 * reports will be delayed with DAC. Start
2466 * the delayed ack timer.
2468 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2469 stcb->sctp_ep, stcb, NULL);
2472 * Ok we must build a SACK since the timer
2473 * is pending, we got our first packet OR
2474 * there are gaps or duplicates.
2476 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2477 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2480 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2481 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2482 stcb->sctp_ep, stcb, NULL);
2489 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2490 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2491 struct sctp_nets *net, uint32_t * high_tsn)
2493 struct sctp_chunkhdr *ch, chunk_buf;
2494 struct sctp_association *asoc;
2495 int num_chunks = 0; /* number of control chunks processed */
2497 int chk_length, break_flag, last_chunk;
2498 int abort_flag = 0, was_a_gap;
2500 uint32_t highest_tsn;
2503 sctp_set_rwnd(stcb, &stcb->asoc);
2506 SCTP_TCB_LOCK_ASSERT(stcb);
2508 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2509 highest_tsn = asoc->highest_tsn_inside_nr_map;
2511 highest_tsn = asoc->highest_tsn_inside_map;
2513 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2515 * setup where we got the last DATA packet from for any SACK that
2516 * may need to go out. Don't bump the net. This is done ONLY when a
2517 * chunk is assigned.
2519 asoc->last_data_chunk_from = net;
2522 * Now before we proceed we must figure out if this is a wasted
2523 * cluster... i.e. it is a small packet sent in and yet the driver
2524 * underneath allocated a full cluster for it. If so we must copy it
2525 * to a smaller mbuf and free up the cluster mbuf. This will help
2526 * with cluster starvation. Note for __Panda__ we don't do this
2527 * since it has clusters all the way down to 64 bytes.
2529 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2530 /* we only handle mbufs that are singletons.. not chains */
2531 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2533 /* ok lets see if we can copy the data up */
2536 /* get the pointers and copy */
2537 to = mtod(m, caddr_t *);
2538 from = mtod((*mm), caddr_t *);
2539 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2540 /* copy the length and free up the old */
2541 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2543 /* sucess, back copy */
2546 /* We are in trouble in the mbuf world .. yikes */
2550 /* get pointer to the first chunk header */
2551 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2552 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2557 * process all DATA chunks...
2559 *high_tsn = asoc->cumulative_tsn;
2561 asoc->data_pkts_seen++;
2562 while (stop_proc == 0) {
2563 /* validate chunk length */
2564 chk_length = ntohs(ch->chunk_length);
2565 if (length - *offset < chk_length) {
2566 /* all done, mutulated chunk */
2570 if ((asoc->idata_supported == 1) &&
2571 (ch->chunk_type == SCTP_DATA)) {
2572 struct mbuf *op_err;
2573 char msg[SCTP_DIAG_INFO_LEN];
2575 snprintf(msg, sizeof(msg), "I-DATA chunk received when DATA was negotiated");
2576 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2577 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2578 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2581 if ((asoc->idata_supported == 0) &&
2582 (ch->chunk_type == SCTP_IDATA)) {
2583 struct mbuf *op_err;
2584 char msg[SCTP_DIAG_INFO_LEN];
2586 snprintf(msg, sizeof(msg), "DATA chunk received when I-DATA was negotiated");
2587 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2588 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2589 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2592 if ((ch->chunk_type == SCTP_DATA) ||
2593 (ch->chunk_type == SCTP_IDATA)) {
2596 if (ch->chunk_type == SCTP_DATA) {
2597 clen = sizeof(struct sctp_data_chunk);
2599 clen = sizeof(struct sctp_idata_chunk);
2601 if ((size_t)chk_length < clen) {
2603 * Need to send an abort since we had a
2604 * invalid data chunk.
2606 struct mbuf *op_err;
2607 char msg[SCTP_DIAG_INFO_LEN];
2609 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2611 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2612 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2613 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2616 #ifdef SCTP_AUDITING_ENABLED
2617 sctp_audit_log(0xB1, 0);
2619 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2624 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2625 chk_length, net, high_tsn, &abort_flag, &break_flag,
2626 last_chunk, ch->chunk_type)) {
2634 * Set because of out of rwnd space and no
2635 * drop rep space left.
2641 /* not a data chunk in the data region */
2642 switch (ch->chunk_type) {
2643 case SCTP_INITIATION:
2644 case SCTP_INITIATION_ACK:
2645 case SCTP_SELECTIVE_ACK:
2646 case SCTP_NR_SELECTIVE_ACK:
2647 case SCTP_HEARTBEAT_REQUEST:
2648 case SCTP_HEARTBEAT_ACK:
2649 case SCTP_ABORT_ASSOCIATION:
2651 case SCTP_SHUTDOWN_ACK:
2652 case SCTP_OPERATION_ERROR:
2653 case SCTP_COOKIE_ECHO:
2654 case SCTP_COOKIE_ACK:
2657 case SCTP_SHUTDOWN_COMPLETE:
2658 case SCTP_AUTHENTICATION:
2659 case SCTP_ASCONF_ACK:
2660 case SCTP_PACKET_DROPPED:
2661 case SCTP_STREAM_RESET:
2662 case SCTP_FORWARD_CUM_TSN:
2665 * Now, what do we do with KNOWN chunks that
2666 * are NOT in the right place?
2668 * For now, I do nothing but ignore them. We
2669 * may later want to add sysctl stuff to
2670 * switch out and do either an ABORT() or
2671 * possibly process them.
2673 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2674 struct mbuf *op_err;
2675 char msg[SCTP_DIAG_INFO_LEN];
2677 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2679 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2680 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2685 /* unknown chunk type, use bit rules */
2686 if (ch->chunk_type & 0x40) {
2687 /* Add a error report to the queue */
2688 struct mbuf *op_err;
2689 struct sctp_gen_error_cause *cause;
2691 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2692 0, M_NOWAIT, 1, MT_DATA);
2693 if (op_err != NULL) {
2694 cause = mtod(op_err, struct sctp_gen_error_cause *);
2695 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2696 cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2697 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2698 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2699 if (SCTP_BUF_NEXT(op_err) != NULL) {
2700 sctp_queue_op_err(stcb, op_err);
2702 sctp_m_freem(op_err);
2706 if ((ch->chunk_type & 0x80) == 0) {
2707 /* discard the rest of this packet */
2709 } /* else skip this bad chunk and
2712 } /* switch of chunk type */
2714 *offset += SCTP_SIZE32(chk_length);
2715 if ((*offset >= length) || stop_proc) {
2716 /* no more data left in the mbuf chain */
2720 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2721 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2730 * we need to report rwnd overrun drops.
2732 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2736 * Did we get data, if so update the time for auto-close and
2737 * give peer credit for being alive.
2739 SCTP_STAT_INCR(sctps_recvpktwithdata);
2740 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2741 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2742 stcb->asoc.overall_error_count,
2744 SCTP_FROM_SCTP_INDATA,
2747 stcb->asoc.overall_error_count = 0;
2748 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2750 /* now service all of the reassm queue if needed */
2751 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2752 /* Assure that we ack right away */
2753 stcb->asoc.send_sack = 1;
2755 /* Start a sack timer or QUEUE a SACK for sending */
2756 sctp_sack_check(stcb, was_a_gap);
2761 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2762 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2764 uint32_t * biggest_newly_acked_tsn,
2765 uint32_t * this_sack_lowest_newack,
2768 struct sctp_tmit_chunk *tp1;
2769 unsigned int theTSN;
2770 int j, wake_him = 0, circled = 0;
2772 /* Recover the tp1 we last saw */
2775 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2777 for (j = frag_strt; j <= frag_end; j++) {
2778 theTSN = j + last_tsn;
2780 if (tp1->rec.data.doing_fast_retransmit)
2784 * CMT: CUCv2 algorithm. For each TSN being
2785 * processed from the sent queue, track the
2786 * next expected pseudo-cumack, or
2787 * rtx_pseudo_cumack, if required. Separate
2788 * cumack trackers for first transmissions,
2789 * and retransmissions.
2791 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2792 (tp1->whoTo->find_pseudo_cumack == 1) &&
2793 (tp1->snd_count == 1)) {
2794 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2795 tp1->whoTo->find_pseudo_cumack = 0;
2797 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2798 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2799 (tp1->snd_count > 1)) {
2800 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2801 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2803 if (tp1->rec.data.TSN_seq == theTSN) {
2804 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2806 * must be held until
2809 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2811 * If it is less than RESEND, it is
2812 * now no-longer in flight.
2813 * Higher values may already be set
2814 * via previous Gap Ack Blocks...
2815 * i.e. ACKED or RESEND.
2817 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2818 *biggest_newly_acked_tsn)) {
2819 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2822 * CMT: SFR algo (and HTNA) - set
2823 * saw_newack to 1 for dest being
2824 * newly acked. update
2825 * this_sack_highest_newack if
2828 if (tp1->rec.data.chunk_was_revoked == 0)
2829 tp1->whoTo->saw_newack = 1;
2831 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2832 tp1->whoTo->this_sack_highest_newack)) {
2833 tp1->whoTo->this_sack_highest_newack =
2834 tp1->rec.data.TSN_seq;
2837 * CMT DAC algo: also update
2838 * this_sack_lowest_newack
2840 if (*this_sack_lowest_newack == 0) {
2841 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2842 sctp_log_sack(*this_sack_lowest_newack,
2844 tp1->rec.data.TSN_seq,
2847 SCTP_LOG_TSN_ACKED);
2849 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2852 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2853 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2854 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2855 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2856 * Separate pseudo_cumack trackers for first transmissions and
2859 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2860 if (tp1->rec.data.chunk_was_revoked == 0) {
2861 tp1->whoTo->new_pseudo_cumack = 1;
2863 tp1->whoTo->find_pseudo_cumack = 1;
2865 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2866 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2868 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2869 if (tp1->rec.data.chunk_was_revoked == 0) {
2870 tp1->whoTo->new_pseudo_cumack = 1;
2872 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2874 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2875 sctp_log_sack(*biggest_newly_acked_tsn,
2877 tp1->rec.data.TSN_seq,
2880 SCTP_LOG_TSN_ACKED);
2882 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2883 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2884 tp1->whoTo->flight_size,
2886 (uint32_t) (uintptr_t) tp1->whoTo,
2887 tp1->rec.data.TSN_seq);
2889 sctp_flight_size_decrease(tp1);
2890 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2891 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2894 sctp_total_flight_decrease(stcb, tp1);
2896 tp1->whoTo->net_ack += tp1->send_size;
2897 if (tp1->snd_count < 2) {
2899 * True non-retransmited chunk
2901 tp1->whoTo->net_ack2 += tp1->send_size;
2909 sctp_calculate_rto(stcb,
2912 &tp1->sent_rcv_time,
2913 sctp_align_safe_nocopy,
2914 SCTP_RTT_FROM_DATA);
2917 if (tp1->whoTo->rto_needed == 0) {
2918 tp1->whoTo->rto_needed = 1;
2924 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2925 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2926 stcb->asoc.this_sack_highest_gap)) {
2927 stcb->asoc.this_sack_highest_gap =
2928 tp1->rec.data.TSN_seq;
2930 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2931 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2932 #ifdef SCTP_AUDITING_ENABLED
2933 sctp_audit_log(0xB2,
2934 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2939 * All chunks NOT UNSENT fall through here and are marked
2940 * (leave PR-SCTP ones that are to skip alone though)
2942 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2943 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2944 tp1->sent = SCTP_DATAGRAM_MARKED;
2946 if (tp1->rec.data.chunk_was_revoked) {
2947 /* deflate the cwnd */
2948 tp1->whoTo->cwnd -= tp1->book_size;
2949 tp1->rec.data.chunk_was_revoked = 0;
2951 /* NR Sack code here */
2953 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2954 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2955 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2958 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2961 if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
2962 (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
2963 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
2964 stcb->asoc.trigger_reset = 1;
2966 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2972 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2973 sctp_m_freem(tp1->data);
2980 } /* if (tp1->TSN_seq == theTSN) */
2981 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2984 tp1 = TAILQ_NEXT(tp1, sctp_next);
2985 if ((tp1 == NULL) && (circled == 0)) {
2987 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2989 } /* end while (tp1) */
2992 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2994 /* In case the fragments were not in order we must reset */
2995 } /* end for (j = fragStart */
2997 return (wake_him); /* Return value only used for nr-sack */
3002 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3003 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3004 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3005 int num_seg, int num_nr_seg, int *rto_ok)
3007 struct sctp_gap_ack_block *frag, block;
3008 struct sctp_tmit_chunk *tp1;
3013 uint16_t frag_strt, frag_end, prev_frag_end;
3015 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3019 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3022 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3024 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3025 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3026 *offset += sizeof(block);
3028 return (chunk_freed);
3030 frag_strt = ntohs(frag->start);
3031 frag_end = ntohs(frag->end);
3033 if (frag_strt > frag_end) {
3034 /* This gap report is malformed, skip it. */
3037 if (frag_strt <= prev_frag_end) {
3038 /* This gap report is not in order, so restart. */
3039 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3041 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3042 *biggest_tsn_acked = last_tsn + frag_end;
3049 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3050 non_revocable, &num_frs, biggest_newly_acked_tsn,
3051 this_sack_lowest_newack, rto_ok)) {
3054 prev_frag_end = frag_end;
3056 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3058 sctp_log_fr(*biggest_tsn_acked,
3059 *biggest_newly_acked_tsn,
3060 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3062 return (chunk_freed);
3066 sctp_check_for_revoked(struct sctp_tcb *stcb,
3067 struct sctp_association *asoc, uint32_t cumack,
3068 uint32_t biggest_tsn_acked)
3070 struct sctp_tmit_chunk *tp1;
3072 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3073 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3075 * ok this guy is either ACK or MARKED. If it is
3076 * ACKED it has been previously acked but not this
3077 * time i.e. revoked. If it is MARKED it was ACK'ed
3080 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3083 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3084 /* it has been revoked */
3085 tp1->sent = SCTP_DATAGRAM_SENT;
3086 tp1->rec.data.chunk_was_revoked = 1;
3088 * We must add this stuff back in to assure
3089 * timers and such get started.
3091 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3092 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3093 tp1->whoTo->flight_size,
3095 (uint32_t) (uintptr_t) tp1->whoTo,
3096 tp1->rec.data.TSN_seq);
3098 sctp_flight_size_increase(tp1);
3099 sctp_total_flight_increase(stcb, tp1);
3101 * We inflate the cwnd to compensate for our
3102 * artificial inflation of the flight_size.
3104 tp1->whoTo->cwnd += tp1->book_size;
3105 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3106 sctp_log_sack(asoc->last_acked_seq,
3108 tp1->rec.data.TSN_seq,
3111 SCTP_LOG_TSN_REVOKED);
3113 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3114 /* it has been re-acked in this SACK */
3115 tp1->sent = SCTP_DATAGRAM_ACKED;
3118 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3125 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3126 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3128 struct sctp_tmit_chunk *tp1;
3129 int strike_flag = 0;
3131 int tot_retrans = 0;
3132 uint32_t sending_seq;
3133 struct sctp_nets *net;
3134 int num_dests_sacked = 0;
3137 * select the sending_seq, this is either the next thing ready to be
3138 * sent but not transmitted, OR, the next seq we assign.
3140 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3142 sending_seq = asoc->sending_seq;
3144 sending_seq = tp1->rec.data.TSN_seq;
3147 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3148 if ((asoc->sctp_cmt_on_off > 0) &&
3149 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3150 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3151 if (net->saw_newack)
3155 if (stcb->asoc.prsctp_supported) {
3156 (void)SCTP_GETTIME_TIMEVAL(&now);
3158 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3160 if (tp1->no_fr_allowed) {
3161 /* this one had a timeout or something */
3164 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3165 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3166 sctp_log_fr(biggest_tsn_newly_acked,
3167 tp1->rec.data.TSN_seq,
3169 SCTP_FR_LOG_CHECK_STRIKE);
3171 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3172 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3176 if (stcb->asoc.prsctp_supported) {
3177 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3178 /* Is it expired? */
3179 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3180 /* Yes so drop it */
3181 if (tp1->data != NULL) {
3182 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3183 SCTP_SO_NOT_LOCKED);
3189 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3190 /* we are beyond the tsn in the sack */
3193 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3194 /* either a RESEND, ACKED, or MARKED */
3196 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3197 /* Continue strikin FWD-TSN chunks */
3198 tp1->rec.data.fwd_tsn_cnt++;
3203 * CMT : SFR algo (covers part of DAC and HTNA as well)
3205 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3207 * No new acks were receieved for data sent to this
3208 * dest. Therefore, according to the SFR algo for
3209 * CMT, no data sent to this dest can be marked for
3210 * FR using this SACK.
3213 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3214 tp1->whoTo->this_sack_highest_newack)) {
3216 * CMT: New acks were receieved for data sent to
3217 * this dest. But no new acks were seen for data
3218 * sent after tp1. Therefore, according to the SFR
3219 * algo for CMT, tp1 cannot be marked for FR using
3220 * this SACK. This step covers part of the DAC algo
3221 * and the HTNA algo as well.
3226 * Here we check to see if we were have already done a FR
3227 * and if so we see if the biggest TSN we saw in the sack is
3228 * smaller than the recovery point. If so we don't strike
3229 * the tsn... otherwise we CAN strike the TSN.
3232 * @@@ JRI: Check for CMT if (accum_moved &&
3233 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3236 if (accum_moved && asoc->fast_retran_loss_recovery) {
3238 * Strike the TSN if in fast-recovery and cum-ack
3241 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3242 sctp_log_fr(biggest_tsn_newly_acked,
3243 tp1->rec.data.TSN_seq,
3245 SCTP_FR_LOG_STRIKE_CHUNK);
3247 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3250 if ((asoc->sctp_cmt_on_off > 0) &&
3251 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3253 * CMT DAC algorithm: If SACK flag is set to
3254 * 0, then lowest_newack test will not pass
3255 * because it would have been set to the
3256 * cumack earlier. If not already to be
3257 * rtx'd, If not a mixed sack and if tp1 is
3258 * not between two sacked TSNs, then mark by
3259 * one more. NOTE that we are marking by one
3260 * additional time since the SACK DAC flag
3261 * indicates that two packets have been
3262 * received after this missing TSN.
3264 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3265 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3266 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3267 sctp_log_fr(16 + num_dests_sacked,
3268 tp1->rec.data.TSN_seq,
3270 SCTP_FR_LOG_STRIKE_CHUNK);
3275 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3276 (asoc->sctp_cmt_on_off == 0)) {
3278 * For those that have done a FR we must take
3279 * special consideration if we strike. I.e the
3280 * biggest_newly_acked must be higher than the
3281 * sending_seq at the time we did the FR.
3284 #ifdef SCTP_FR_TO_ALTERNATE
3286 * If FR's go to new networks, then we must only do
3287 * this for singly homed asoc's. However if the FR's
3288 * go to the same network (Armando's work) then its
3289 * ok to FR multiple times.
3297 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3298 tp1->rec.data.fast_retran_tsn)) {
3300 * Strike the TSN, since this ack is
3301 * beyond where things were when we
3304 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3305 sctp_log_fr(biggest_tsn_newly_acked,
3306 tp1->rec.data.TSN_seq,
3308 SCTP_FR_LOG_STRIKE_CHUNK);
3310 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3314 if ((asoc->sctp_cmt_on_off > 0) &&
3315 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3317 * CMT DAC algorithm: If
3318 * SACK flag is set to 0,
3319 * then lowest_newack test
3320 * will not pass because it
3321 * would have been set to
3322 * the cumack earlier. If
3323 * not already to be rtx'd,
3324 * If not a mixed sack and
3325 * if tp1 is not between two
3326 * sacked TSNs, then mark by
3327 * one more. NOTE that we
3328 * are marking by one
3329 * additional time since the
3330 * SACK DAC flag indicates
3331 * that two packets have
3332 * been received after this
3335 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3336 (num_dests_sacked == 1) &&
3337 SCTP_TSN_GT(this_sack_lowest_newack,
3338 tp1->rec.data.TSN_seq)) {
3339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3340 sctp_log_fr(32 + num_dests_sacked,
3341 tp1->rec.data.TSN_seq,
3343 SCTP_FR_LOG_STRIKE_CHUNK);
3345 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3353 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3356 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3357 biggest_tsn_newly_acked)) {
3359 * We don't strike these: This is the HTNA
3360 * algorithm i.e. we don't strike If our TSN is
3361 * larger than the Highest TSN Newly Acked.
3365 /* Strike the TSN */
3366 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3367 sctp_log_fr(biggest_tsn_newly_acked,
3368 tp1->rec.data.TSN_seq,
3370 SCTP_FR_LOG_STRIKE_CHUNK);
3372 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3375 if ((asoc->sctp_cmt_on_off > 0) &&
3376 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3378 * CMT DAC algorithm: If SACK flag is set to
3379 * 0, then lowest_newack test will not pass
3380 * because it would have been set to the
3381 * cumack earlier. If not already to be
3382 * rtx'd, If not a mixed sack and if tp1 is
3383 * not between two sacked TSNs, then mark by
3384 * one more. NOTE that we are marking by one
3385 * additional time since the SACK DAC flag
3386 * indicates that two packets have been
3387 * received after this missing TSN.
3389 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3390 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3391 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3392 sctp_log_fr(48 + num_dests_sacked,
3393 tp1->rec.data.TSN_seq,
3395 SCTP_FR_LOG_STRIKE_CHUNK);
3401 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3402 struct sctp_nets *alt;
3404 /* fix counts and things */
3405 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3406 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3407 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3409 (uint32_t) (uintptr_t) tp1->whoTo,
3410 tp1->rec.data.TSN_seq);
3413 tp1->whoTo->net_ack++;
3414 sctp_flight_size_decrease(tp1);
3415 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3416 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3420 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3421 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3422 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3424 /* add back to the rwnd */
3425 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3427 /* remove from the total flight */
3428 sctp_total_flight_decrease(stcb, tp1);
3430 if ((stcb->asoc.prsctp_supported) &&
3431 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3433 * Has it been retransmitted tv_sec times? -
3434 * we store the retran count there.
3436 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3437 /* Yes, so drop it */
3438 if (tp1->data != NULL) {
3439 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3440 SCTP_SO_NOT_LOCKED);
3442 /* Make sure to flag we had a FR */
3443 tp1->whoTo->net_ack++;
3448 * SCTP_PRINTF("OK, we are now ready to FR this
3451 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3452 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3456 /* This is a subsequent FR */
3457 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3459 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3460 if (asoc->sctp_cmt_on_off > 0) {
3462 * CMT: Using RTX_SSTHRESH policy for CMT.
3463 * If CMT is being used, then pick dest with
3464 * largest ssthresh for any retransmission.
3466 tp1->no_fr_allowed = 1;
3468 /* sa_ignore NO_NULL_CHK */
3469 if (asoc->sctp_cmt_pf > 0) {
3471 * JRS 5/18/07 - If CMT PF is on,
3472 * use the PF version of
3475 alt = sctp_find_alternate_net(stcb, alt, 2);
3478 * JRS 5/18/07 - If only CMT is on,
3479 * use the CMT version of
3482 /* sa_ignore NO_NULL_CHK */
3483 alt = sctp_find_alternate_net(stcb, alt, 1);
3489 * CUCv2: If a different dest is picked for
3490 * the retransmission, then new
3491 * (rtx-)pseudo_cumack needs to be tracked
3492 * for orig dest. Let CUCv2 track new (rtx-)
3493 * pseudo-cumack always.
3496 tp1->whoTo->find_pseudo_cumack = 1;
3497 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3499 } else {/* CMT is OFF */
3501 #ifdef SCTP_FR_TO_ALTERNATE
3502 /* Can we find an alternate? */
3503 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3506 * default behavior is to NOT retransmit
3507 * FR's to an alternate. Armando Caro's
3508 * paper details why.
3514 tp1->rec.data.doing_fast_retransmit = 1;
3516 /* mark the sending seq for possible subsequent FR's */
3518 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3519 * (uint32_t)tpi->rec.data.TSN_seq);
3521 if (TAILQ_EMPTY(&asoc->send_queue)) {
3523 * If the queue of send is empty then its
3524 * the next sequence number that will be
3525 * assigned so we subtract one from this to
3526 * get the one we last sent.
3528 tp1->rec.data.fast_retran_tsn = sending_seq;
3531 * If there are chunks on the send queue
3532 * (unsent data that has made it from the
3533 * stream queues but not out the door, we
3534 * take the first one (which will have the
3535 * lowest TSN) and subtract one to get the
3538 struct sctp_tmit_chunk *ttt;
3540 ttt = TAILQ_FIRST(&asoc->send_queue);
3541 tp1->rec.data.fast_retran_tsn =
3542 ttt->rec.data.TSN_seq;
3547 * this guy had a RTO calculation pending on
3550 if ((tp1->whoTo != NULL) &&
3551 (tp1->whoTo->rto_needed == 0)) {
3552 tp1->whoTo->rto_needed = 1;
3556 if (alt != tp1->whoTo) {
3557 /* yes, there is an alternate. */
3558 sctp_free_remote_addr(tp1->whoTo);
3559 /* sa_ignore FREED_MEMORY */
3561 atomic_add_int(&alt->ref_count, 1);
3567 struct sctp_tmit_chunk *
3568 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3569 struct sctp_association *asoc)
3571 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3575 if (asoc->prsctp_supported == 0) {
3578 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3579 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3580 tp1->sent != SCTP_DATAGRAM_RESEND &&
3581 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3582 /* no chance to advance, out of here */
3585 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3586 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3587 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3588 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3589 asoc->advanced_peer_ack_point,
3590 tp1->rec.data.TSN_seq, 0, 0);
3593 if (!PR_SCTP_ENABLED(tp1->flags)) {
3595 * We can't fwd-tsn past any that are reliable aka
3596 * retransmitted until the asoc fails.
3601 (void)SCTP_GETTIME_TIMEVAL(&now);
3605 * now we got a chunk which is marked for another
3606 * retransmission to a PR-stream but has run out its chances
3607 * already maybe OR has been marked to skip now. Can we skip
3608 * it if its a resend?
3610 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3611 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3613 * Now is this one marked for resend and its time is
3616 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3617 /* Yes so drop it */
3619 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3620 1, SCTP_SO_NOT_LOCKED);
3624 * No, we are done when hit one for resend
3625 * whos time as not expired.
3631 * Ok now if this chunk is marked to drop it we can clean up
3632 * the chunk, advance our peer ack point and we can check
3635 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3636 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3637 /* advance PeerAckPoint goes forward */
3638 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3639 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3641 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3642 /* No update but we do save the chk */
3647 * If it is still in RESEND we can advance no
3657 sctp_fs_audit(struct sctp_association *asoc)
3659 struct sctp_tmit_chunk *chk;
3660 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3664 int entry_flight, entry_cnt;
3670 entry_flight = asoc->total_flight;
3671 entry_cnt = asoc->total_flight_count;
3673 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3676 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3677 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3678 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3679 chk->rec.data.TSN_seq,
3683 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3685 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3687 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3694 if ((inflight > 0) || (inbetween > 0)) {
3696 panic("Flight size-express incorrect? \n");
3698 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3699 entry_flight, entry_cnt);
3701 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3702 inflight, inbetween, resend, above, acked);
3711 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3712 struct sctp_association *asoc,
3713 struct sctp_tmit_chunk *tp1)
3715 tp1->window_probe = 0;
3716 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3717 /* TSN's skipped we do NOT move back. */
3718 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3719 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3721 (uint32_t) (uintptr_t) tp1->whoTo,
3722 tp1->rec.data.TSN_seq);
3725 /* First setup this by shrinking flight */
3726 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3727 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3730 sctp_flight_size_decrease(tp1);
3731 sctp_total_flight_decrease(stcb, tp1);
3732 /* Now mark for resend */
3733 tp1->sent = SCTP_DATAGRAM_RESEND;
3734 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3736 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3737 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3738 tp1->whoTo->flight_size,
3740 (uint32_t) (uintptr_t) tp1->whoTo,
3741 tp1->rec.data.TSN_seq);
3746 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3747 uint32_t rwnd, int *abort_now, int ecne_seen)
3749 struct sctp_nets *net;
3750 struct sctp_association *asoc;
3751 struct sctp_tmit_chunk *tp1, *tp2;
3753 int win_probe_recovery = 0;
3754 int win_probe_recovered = 0;
3755 int j, done_once = 0;
3758 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3759 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3760 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3762 SCTP_TCB_LOCK_ASSERT(stcb);
3763 #ifdef SCTP_ASOCLOG_OF_TSNS
3764 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3765 stcb->asoc.cumack_log_at++;
3766 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3767 stcb->asoc.cumack_log_at = 0;
3771 old_rwnd = asoc->peers_rwnd;
3772 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3775 } else if (asoc->last_acked_seq == cumack) {
3776 /* Window update sack */
3777 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3778 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3779 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3780 /* SWS sender side engages */
3781 asoc->peers_rwnd = 0;
3783 if (asoc->peers_rwnd > old_rwnd) {
3788 /* First setup for CC stuff */
3789 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3790 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3791 /* Drag along the window_tsn for cwr's */
3792 net->cwr_window_tsn = cumack;
3794 net->prev_cwnd = net->cwnd;
3799 * CMT: Reset CUC and Fast recovery algo variables before
3802 net->new_pseudo_cumack = 0;
3803 net->will_exit_fast_recovery = 0;
3804 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3805 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3808 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3811 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3812 tp1 = TAILQ_LAST(&asoc->sent_queue,
3813 sctpchunk_listhead);
3814 send_s = tp1->rec.data.TSN_seq + 1;
3816 send_s = asoc->sending_seq;
3818 if (SCTP_TSN_GE(cumack, send_s)) {
3819 struct mbuf *op_err;
3820 char msg[SCTP_DIAG_INFO_LEN];
3824 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3826 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3827 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3828 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3832 asoc->this_sack_highest_gap = cumack;
3833 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3834 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3835 stcb->asoc.overall_error_count,
3837 SCTP_FROM_SCTP_INDATA,
3840 stcb->asoc.overall_error_count = 0;
3841 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3842 /* process the new consecutive TSN first */
3843 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3844 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3845 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3846 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3848 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3850 * If it is less than ACKED, it is
3851 * now no-longer in flight. Higher
3852 * values may occur during marking
3854 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3855 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3856 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3857 tp1->whoTo->flight_size,
3859 (uint32_t) (uintptr_t) tp1->whoTo,
3860 tp1->rec.data.TSN_seq);
3862 sctp_flight_size_decrease(tp1);
3863 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3864 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3867 /* sa_ignore NO_NULL_CHK */
3868 sctp_total_flight_decrease(stcb, tp1);
3870 tp1->whoTo->net_ack += tp1->send_size;
3871 if (tp1->snd_count < 2) {
3873 * True non-retransmited
3876 tp1->whoTo->net_ack2 +=
3879 /* update RTO too? */
3888 sctp_calculate_rto(stcb,
3890 &tp1->sent_rcv_time,
3891 sctp_align_safe_nocopy,
3892 SCTP_RTT_FROM_DATA);
3895 if (tp1->whoTo->rto_needed == 0) {
3896 tp1->whoTo->rto_needed = 1;
3902 * CMT: CUCv2 algorithm. From the
3903 * cumack'd TSNs, for each TSN being
3904 * acked for the first time, set the
3905 * following variables for the
3906 * corresp destination.
3907 * new_pseudo_cumack will trigger a
3909 * find_(rtx_)pseudo_cumack will
3910 * trigger search for the next
3911 * expected (rtx-)pseudo-cumack.
3913 tp1->whoTo->new_pseudo_cumack = 1;
3914 tp1->whoTo->find_pseudo_cumack = 1;
3915 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3917 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3918 /* sa_ignore NO_NULL_CHK */
3919 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3922 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3923 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3925 if (tp1->rec.data.chunk_was_revoked) {
3926 /* deflate the cwnd */
3927 tp1->whoTo->cwnd -= tp1->book_size;
3928 tp1->rec.data.chunk_was_revoked = 0;
3930 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3931 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3932 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3935 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3939 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3940 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3941 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3942 asoc->trigger_reset = 1;
3944 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3946 /* sa_ignore NO_NULL_CHK */
3947 sctp_free_bufspace(stcb, asoc, tp1, 1);
3948 sctp_m_freem(tp1->data);
3951 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3952 sctp_log_sack(asoc->last_acked_seq,
3954 tp1->rec.data.TSN_seq,
3957 SCTP_LOG_FREE_SENT);
3959 asoc->sent_queue_cnt--;
3960 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3967 /* sa_ignore NO_NULL_CHK */
3968 if (stcb->sctp_socket) {
3969 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3973 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3974 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3975 /* sa_ignore NO_NULL_CHK */
3976 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3978 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3979 so = SCTP_INP_SO(stcb->sctp_ep);
3980 atomic_add_int(&stcb->asoc.refcnt, 1);
3981 SCTP_TCB_UNLOCK(stcb);
3982 SCTP_SOCKET_LOCK(so, 1);
3983 SCTP_TCB_LOCK(stcb);
3984 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3985 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3986 /* assoc was freed while we were unlocked */
3987 SCTP_SOCKET_UNLOCK(so, 1);
3991 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3992 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3993 SCTP_SOCKET_UNLOCK(so, 1);
3996 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3997 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4001 /* JRS - Use the congestion control given in the CC module */
4002 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4003 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4004 if (net->net_ack2 > 0) {
4006 * Karn's rule applies to clearing error
4007 * count, this is optional.
4009 net->error_count = 0;
4010 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4011 /* addr came good */
4012 net->dest_state |= SCTP_ADDR_REACHABLE;
4013 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4014 0, (void *)net, SCTP_SO_NOT_LOCKED);
4016 if (net == stcb->asoc.primary_destination) {
4017 if (stcb->asoc.alternate) {
4019 * release the alternate,
4022 sctp_free_remote_addr(stcb->asoc.alternate);
4023 stcb->asoc.alternate = NULL;
4026 if (net->dest_state & SCTP_ADDR_PF) {
4027 net->dest_state &= ~SCTP_ADDR_PF;
4028 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4029 stcb->sctp_ep, stcb, net,
4030 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4031 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4032 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4033 /* Done with this net */
4036 /* restore any doubled timers */
4037 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4038 if (net->RTO < stcb->asoc.minrto) {
4039 net->RTO = stcb->asoc.minrto;
4041 if (net->RTO > stcb->asoc.maxrto) {
4042 net->RTO = stcb->asoc.maxrto;
4046 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4048 asoc->last_acked_seq = cumack;
4050 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4051 /* nothing left in-flight */
4052 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4053 net->flight_size = 0;
4054 net->partial_bytes_acked = 0;
4056 asoc->total_flight = 0;
4057 asoc->total_flight_count = 0;
4060 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4061 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4062 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4063 /* SWS sender side engages */
4064 asoc->peers_rwnd = 0;
4066 if (asoc->peers_rwnd > old_rwnd) {
4067 win_probe_recovery = 1;
4069 /* Now assure a timer where data is queued at */
4072 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4075 if (win_probe_recovery && (net->window_probe)) {
4076 win_probe_recovered = 1;
4078 * Find first chunk that was used with window probe
4079 * and clear the sent
4081 /* sa_ignore FREED_MEMORY */
4082 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4083 if (tp1->window_probe) {
4084 /* move back to data send queue */
4085 sctp_window_probe_recovery(stcb, asoc, tp1);
4090 if (net->RTO == 0) {
4091 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4093 to_ticks = MSEC_TO_TICKS(net->RTO);
4095 if (net->flight_size) {
4097 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4098 sctp_timeout_handler, &net->rxt_timer);
4099 if (net->window_probe) {
4100 net->window_probe = 0;
4103 if (net->window_probe) {
4105 * In window probes we must assure a timer
4106 * is still running there
4108 net->window_probe = 0;
4109 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4110 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4111 sctp_timeout_handler, &net->rxt_timer);
4113 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4114 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4116 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4121 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4122 (asoc->sent_queue_retran_cnt == 0) &&
4123 (win_probe_recovered == 0) &&
4126 * huh, this should not happen unless all packets are
4127 * PR-SCTP and marked to skip of course.
4129 if (sctp_fs_audit(asoc)) {
4130 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4131 net->flight_size = 0;
4133 asoc->total_flight = 0;
4134 asoc->total_flight_count = 0;
4135 asoc->sent_queue_retran_cnt = 0;
4136 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4137 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4138 sctp_flight_size_increase(tp1);
4139 sctp_total_flight_increase(stcb, tp1);
4140 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4141 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4148 /**********************************/
4149 /* Now what about shutdown issues */
4150 /**********************************/
4151 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4152 /* nothing left on sendqueue.. consider done */
4154 if ((asoc->stream_queue_cnt == 1) &&
4155 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4156 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4157 (asoc->locked_on_sending)
4159 struct sctp_stream_queue_pending *sp;
4162 * I may be in a state where we got all across.. but
4163 * cannot write more due to a shutdown... we abort
4164 * since the user did not indicate EOR in this case.
4165 * The sp will be cleaned during free of the asoc.
4167 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4169 if ((sp) && (sp->length == 0)) {
4170 /* Let cleanup code purge it */
4171 if (sp->msg_is_complete) {
4172 asoc->stream_queue_cnt--;
4174 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4175 asoc->locked_on_sending = NULL;
4176 asoc->stream_queue_cnt--;
4180 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4181 (asoc->stream_queue_cnt == 0)) {
4182 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4183 /* Need to abort here */
4184 struct mbuf *op_err;
4189 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4190 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4191 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4194 struct sctp_nets *netp;
4196 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4197 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4198 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4200 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4201 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4202 sctp_stop_timers_for_shutdown(stcb);
4203 if (asoc->alternate) {
4204 netp = asoc->alternate;
4206 netp = asoc->primary_destination;
4208 sctp_send_shutdown(stcb, netp);
4209 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4210 stcb->sctp_ep, stcb, netp);
4211 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4212 stcb->sctp_ep, stcb, netp);
4214 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4215 (asoc->stream_queue_cnt == 0)) {
4216 struct sctp_nets *netp;
4218 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4221 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4222 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4223 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4224 sctp_stop_timers_for_shutdown(stcb);
4225 if (asoc->alternate) {
4226 netp = asoc->alternate;
4228 netp = asoc->primary_destination;
4230 sctp_send_shutdown_ack(stcb, netp);
4231 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4232 stcb->sctp_ep, stcb, netp);
4235 /*********************************************/
4236 /* Here we perform PR-SCTP procedures */
4238 /*********************************************/
4239 /* C1. update advancedPeerAckPoint */
4240 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4241 asoc->advanced_peer_ack_point = cumack;
4243 /* PR-Sctp issues need to be addressed too */
4244 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4245 struct sctp_tmit_chunk *lchk;
4246 uint32_t old_adv_peer_ack_point;
4248 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4249 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4250 /* C3. See if we need to send a Fwd-TSN */
4251 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4253 * ISSUE with ECN, see FWD-TSN processing.
4255 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4256 send_forward_tsn(stcb, asoc);
4258 /* try to FR fwd-tsn's that get lost too */
4259 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4260 send_forward_tsn(stcb, asoc);
4265 /* Assure a timer is up */
4266 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4267 stcb->sctp_ep, stcb, lchk->whoTo);
4270 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4271 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4273 stcb->asoc.peers_rwnd,
4274 stcb->asoc.total_flight,
4275 stcb->asoc.total_output_queue_size);
4280 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4281 struct sctp_tcb *stcb,
4282 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4283 int *abort_now, uint8_t flags,
4284 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4286 struct sctp_association *asoc;
4287 struct sctp_tmit_chunk *tp1, *tp2;
4288 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4289 uint16_t wake_him = 0;
4290 uint32_t send_s = 0;
4292 int accum_moved = 0;
4293 int will_exit_fast_recovery = 0;
4294 uint32_t a_rwnd, old_rwnd;
4295 int win_probe_recovery = 0;
4296 int win_probe_recovered = 0;
4297 struct sctp_nets *net = NULL;
4300 uint8_t reneged_all = 0;
4301 uint8_t cmt_dac_flag;
4304 * we take any chance we can to service our queues since we cannot
4305 * get awoken when the socket is read from :<
4308 * Now perform the actual SACK handling: 1) Verify that it is not an
4309 * old sack, if so discard. 2) If there is nothing left in the send
4310 * queue (cum-ack is equal to last acked) then you have a duplicate
4311 * too, update any rwnd change and verify no timers are running.
4312 * then return. 3) Process any new consequtive data i.e. cum-ack
4313 * moved process these first and note that it moved. 4) Process any
4314 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4315 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4316 * sync up flightsizes and things, stop all timers and also check
4317 * for shutdown_pending state. If so then go ahead and send off the
4318 * shutdown. If in shutdown recv, send off the shutdown-ack and
4319 * start that timer, Ret. 9) Strike any non-acked things and do FR
4320 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4321 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4322 * if in shutdown_recv state.
4324 SCTP_TCB_LOCK_ASSERT(stcb);
4326 this_sack_lowest_newack = 0;
4327 SCTP_STAT_INCR(sctps_slowpath_sack);
4329 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4330 #ifdef SCTP_ASOCLOG_OF_TSNS
4331 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4332 stcb->asoc.cumack_log_at++;
4333 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4334 stcb->asoc.cumack_log_at = 0;
4339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4340 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4341 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4343 old_rwnd = stcb->asoc.peers_rwnd;
4344 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4345 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4346 stcb->asoc.overall_error_count,
4348 SCTP_FROM_SCTP_INDATA,
4351 stcb->asoc.overall_error_count = 0;
4353 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4354 sctp_log_sack(asoc->last_acked_seq,
4361 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4363 uint32_t *dupdata, dblock;
4365 for (i = 0; i < num_dup; i++) {
4366 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4367 sizeof(uint32_t), (uint8_t *) & dblock);
4368 if (dupdata == NULL) {
4371 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4374 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4376 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4377 tp1 = TAILQ_LAST(&asoc->sent_queue,
4378 sctpchunk_listhead);
4379 send_s = tp1->rec.data.TSN_seq + 1;
4382 send_s = asoc->sending_seq;
4384 if (SCTP_TSN_GE(cum_ack, send_s)) {
4385 struct mbuf *op_err;
4386 char msg[SCTP_DIAG_INFO_LEN];
4389 * no way, we have not even sent this TSN out yet.
4390 * Peer is hopelessly messed up with us.
4392 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4395 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4396 tp1->rec.data.TSN_seq, (void *)tp1);
4401 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4403 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4404 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4405 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4409 /**********************/
4410 /* 1) check the range */
4411 /**********************/
4412 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4413 /* acking something behind */
4416 /* update the Rwnd of the peer */
4417 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4418 TAILQ_EMPTY(&asoc->send_queue) &&
4419 (asoc->stream_queue_cnt == 0)) {
4420 /* nothing left on send/sent and strmq */
4421 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4422 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4423 asoc->peers_rwnd, 0, 0, a_rwnd);
4425 asoc->peers_rwnd = a_rwnd;
4426 if (asoc->sent_queue_retran_cnt) {
4427 asoc->sent_queue_retran_cnt = 0;
4429 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4430 /* SWS sender side engages */
4431 asoc->peers_rwnd = 0;
4433 /* stop any timers */
4434 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4435 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4436 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4437 net->partial_bytes_acked = 0;
4438 net->flight_size = 0;
4440 asoc->total_flight = 0;
4441 asoc->total_flight_count = 0;
4445 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4446 * things. The total byte count acked is tracked in netAckSz AND
4447 * netAck2 is used to track the total bytes acked that are un-
4448 * amibguious and were never retransmitted. We track these on a per
4449 * destination address basis.
4451 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4452 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4453 /* Drag along the window_tsn for cwr's */
4454 net->cwr_window_tsn = cum_ack;
4456 net->prev_cwnd = net->cwnd;
4461 * CMT: Reset CUC and Fast recovery algo variables before
4464 net->new_pseudo_cumack = 0;
4465 net->will_exit_fast_recovery = 0;
4466 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4467 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4470 /* process the new consecutive TSN first */
4471 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4472 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4473 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4475 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4477 * If it is less than ACKED, it is
4478 * now no-longer in flight. Higher
4479 * values may occur during marking
4481 if ((tp1->whoTo->dest_state &
4482 SCTP_ADDR_UNCONFIRMED) &&
4483 (tp1->snd_count < 2)) {
4485 * If there was no retran
4486 * and the address is
4487 * un-confirmed and we sent
4489 * sacked.. its confirmed,
4492 tp1->whoTo->dest_state &=
4493 ~SCTP_ADDR_UNCONFIRMED;
4495 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4496 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4497 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4498 tp1->whoTo->flight_size,
4500 (uint32_t) (uintptr_t) tp1->whoTo,
4501 tp1->rec.data.TSN_seq);
4503 sctp_flight_size_decrease(tp1);
4504 sctp_total_flight_decrease(stcb, tp1);
4505 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4506 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4510 tp1->whoTo->net_ack += tp1->send_size;
4512 /* CMT SFR and DAC algos */
4513 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4514 tp1->whoTo->saw_newack = 1;
4516 if (tp1->snd_count < 2) {
4518 * True non-retransmited
4521 tp1->whoTo->net_ack2 +=
4524 /* update RTO too? */
4528 sctp_calculate_rto(stcb,
4530 &tp1->sent_rcv_time,
4531 sctp_align_safe_nocopy,
4532 SCTP_RTT_FROM_DATA);
4535 if (tp1->whoTo->rto_needed == 0) {
4536 tp1->whoTo->rto_needed = 1;
4542 * CMT: CUCv2 algorithm. From the
4543 * cumack'd TSNs, for each TSN being
4544 * acked for the first time, set the
4545 * following variables for the
4546 * corresp destination.
4547 * new_pseudo_cumack will trigger a
4549 * find_(rtx_)pseudo_cumack will
4550 * trigger search for the next
4551 * expected (rtx-)pseudo-cumack.
4553 tp1->whoTo->new_pseudo_cumack = 1;
4554 tp1->whoTo->find_pseudo_cumack = 1;
4555 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4558 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4559 sctp_log_sack(asoc->last_acked_seq,
4561 tp1->rec.data.TSN_seq,
4564 SCTP_LOG_TSN_ACKED);
4566 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4567 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4570 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4571 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4572 #ifdef SCTP_AUDITING_ENABLED
4573 sctp_audit_log(0xB3,
4574 (asoc->sent_queue_retran_cnt & 0x000000ff));
4577 if (tp1->rec.data.chunk_was_revoked) {
4578 /* deflate the cwnd */
4579 tp1->whoTo->cwnd -= tp1->book_size;
4580 tp1->rec.data.chunk_was_revoked = 0;
4582 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4583 tp1->sent = SCTP_DATAGRAM_ACKED;
4590 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4591 /* always set this up to cum-ack */
4592 asoc->this_sack_highest_gap = last_tsn;
4594 if ((num_seg > 0) || (num_nr_seg > 0)) {
4597 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4598 * to be greater than the cumack. Also reset saw_newack to 0
4601 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4602 net->saw_newack = 0;
4603 net->this_sack_highest_newack = last_tsn;
4607 * thisSackHighestGap will increase while handling NEW
4608 * segments this_sack_highest_newack will increase while
4609 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4610 * used for CMT DAC algo. saw_newack will also change.
4612 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4613 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4614 num_seg, num_nr_seg, &rto_ok)) {
4617 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4619 * validate the biggest_tsn_acked in the gap acks if
4620 * strict adherence is wanted.
4622 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4624 * peer is either confused or we are under
4625 * attack. We must abort.
4627 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4628 biggest_tsn_acked, send_s);
4633 /*******************************************/
4634 /* cancel ALL T3-send timer if accum moved */
4635 /*******************************************/
4636 if (asoc->sctp_cmt_on_off > 0) {
4637 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4638 if (net->new_pseudo_cumack)
4639 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4641 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4646 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4647 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4648 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4652 /********************************************/
4653 /* drop the acked chunks from the sentqueue */
4654 /********************************************/
4655 asoc->last_acked_seq = cum_ack;
4657 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4658 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4661 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4662 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4663 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4666 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4670 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4671 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4672 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4673 asoc->trigger_reset = 1;
4675 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4676 if (PR_SCTP_ENABLED(tp1->flags)) {
4677 if (asoc->pr_sctp_cnt != 0)
4678 asoc->pr_sctp_cnt--;
4680 asoc->sent_queue_cnt--;
4682 /* sa_ignore NO_NULL_CHK */
4683 sctp_free_bufspace(stcb, asoc, tp1, 1);
4684 sctp_m_freem(tp1->data);
4686 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4687 asoc->sent_queue_cnt_removeable--;
4690 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4691 sctp_log_sack(asoc->last_acked_seq,
4693 tp1->rec.data.TSN_seq,
4696 SCTP_LOG_FREE_SENT);
4698 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4701 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4703 panic("Warning flight size is postive and should be 0");
4705 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4706 asoc->total_flight);
4708 asoc->total_flight = 0;
4710 /* sa_ignore NO_NULL_CHK */
4711 if ((wake_him) && (stcb->sctp_socket)) {
4712 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4716 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4718 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4720 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4721 so = SCTP_INP_SO(stcb->sctp_ep);
4722 atomic_add_int(&stcb->asoc.refcnt, 1);
4723 SCTP_TCB_UNLOCK(stcb);
4724 SCTP_SOCKET_LOCK(so, 1);
4725 SCTP_TCB_LOCK(stcb);
4726 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4727 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4728 /* assoc was freed while we were unlocked */
4729 SCTP_SOCKET_UNLOCK(so, 1);
4733 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4734 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4735 SCTP_SOCKET_UNLOCK(so, 1);
4738 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4739 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4743 if (asoc->fast_retran_loss_recovery && accum_moved) {
4744 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4745 /* Setup so we will exit RFC2582 fast recovery */
4746 will_exit_fast_recovery = 1;
4750 * Check for revoked fragments:
4752 * if Previous sack - Had no frags then we can't have any revoked if
4753 * Previous sack - Had frag's then - If we now have frags aka
4754 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4755 * some of them. else - The peer revoked all ACKED fragments, since
4756 * we had some before and now we have NONE.
4760 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4761 asoc->saw_sack_with_frags = 1;
4762 } else if (asoc->saw_sack_with_frags) {
4763 int cnt_revoked = 0;
4765 /* Peer revoked all dg's marked or acked */
4766 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4767 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4768 tp1->sent = SCTP_DATAGRAM_SENT;
4769 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4770 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4771 tp1->whoTo->flight_size,
4773 (uint32_t) (uintptr_t) tp1->whoTo,
4774 tp1->rec.data.TSN_seq);
4776 sctp_flight_size_increase(tp1);
4777 sctp_total_flight_increase(stcb, tp1);
4778 tp1->rec.data.chunk_was_revoked = 1;
4780 * To ensure that this increase in
4781 * flightsize, which is artificial, does not
4782 * throttle the sender, we also increase the
4783 * cwnd artificially.
4785 tp1->whoTo->cwnd += tp1->book_size;
4792 asoc->saw_sack_with_frags = 0;
4795 asoc->saw_sack_with_nr_frags = 1;
4797 asoc->saw_sack_with_nr_frags = 0;
4799 /* JRS - Use the congestion control given in the CC module */
4800 if (ecne_seen == 0) {
4801 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4802 if (net->net_ack2 > 0) {
4804 * Karn's rule applies to clearing error
4805 * count, this is optional.
4807 net->error_count = 0;
4808 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4809 /* addr came good */
4810 net->dest_state |= SCTP_ADDR_REACHABLE;
4811 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4812 0, (void *)net, SCTP_SO_NOT_LOCKED);
4814 if (net == stcb->asoc.primary_destination) {
4815 if (stcb->asoc.alternate) {
4817 * release the alternate,
4820 sctp_free_remote_addr(stcb->asoc.alternate);
4821 stcb->asoc.alternate = NULL;
4824 if (net->dest_state & SCTP_ADDR_PF) {
4825 net->dest_state &= ~SCTP_ADDR_PF;
4826 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4827 stcb->sctp_ep, stcb, net,
4828 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4829 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4830 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4831 /* Done with this net */
4834 /* restore any doubled timers */
4835 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4836 if (net->RTO < stcb->asoc.minrto) {
4837 net->RTO = stcb->asoc.minrto;
4839 if (net->RTO > stcb->asoc.maxrto) {
4840 net->RTO = stcb->asoc.maxrto;
4844 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4846 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4847 /* nothing left in-flight */
4848 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4849 /* stop all timers */
4850 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4852 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4853 net->flight_size = 0;
4854 net->partial_bytes_acked = 0;
4856 asoc->total_flight = 0;
4857 asoc->total_flight_count = 0;
4859 /**********************************/
4860 /* Now what about shutdown issues */
4861 /**********************************/
4862 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4863 /* nothing left on sendqueue.. consider done */
4864 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4865 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4866 asoc->peers_rwnd, 0, 0, a_rwnd);
4868 asoc->peers_rwnd = a_rwnd;
4869 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4870 /* SWS sender side engages */
4871 asoc->peers_rwnd = 0;
4874 if ((asoc->stream_queue_cnt == 1) &&
4875 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4876 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4877 (asoc->locked_on_sending)
4879 struct sctp_stream_queue_pending *sp;
4882 * I may be in a state where we got all across.. but
4883 * cannot write more due to a shutdown... we abort
4884 * since the user did not indicate EOR in this case.
4886 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4888 if ((sp) && (sp->length == 0)) {
4889 asoc->locked_on_sending = NULL;
4890 if (sp->msg_is_complete) {
4891 asoc->stream_queue_cnt--;
4893 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4894 asoc->stream_queue_cnt--;
4898 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4899 (asoc->stream_queue_cnt == 0)) {
4900 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4901 /* Need to abort here */
4902 struct mbuf *op_err;
4907 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4908 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4909 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4912 struct sctp_nets *netp;
4914 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4915 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4916 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4918 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4919 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4920 sctp_stop_timers_for_shutdown(stcb);
4921 if (asoc->alternate) {
4922 netp = asoc->alternate;
4924 netp = asoc->primary_destination;
4926 sctp_send_shutdown(stcb, netp);
4927 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4928 stcb->sctp_ep, stcb, netp);
4929 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4930 stcb->sctp_ep, stcb, netp);
4933 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4934 (asoc->stream_queue_cnt == 0)) {
4935 struct sctp_nets *netp;
4937 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4940 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4941 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4942 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4943 sctp_stop_timers_for_shutdown(stcb);
4944 if (asoc->alternate) {
4945 netp = asoc->alternate;
4947 netp = asoc->primary_destination;
4949 sctp_send_shutdown_ack(stcb, netp);
4950 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4951 stcb->sctp_ep, stcb, netp);
4956 * Now here we are going to recycle net_ack for a different use...
4959 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4964 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4965 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4966 * automatically ensure that.
4968 if ((asoc->sctp_cmt_on_off > 0) &&
4969 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4970 (cmt_dac_flag == 0)) {
4971 this_sack_lowest_newack = cum_ack;
4973 if ((num_seg > 0) || (num_nr_seg > 0)) {
4974 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4975 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4977 /* JRS - Use the congestion control given in the CC module */
4978 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4980 /* Now are we exiting loss recovery ? */
4981 if (will_exit_fast_recovery) {
4982 /* Ok, we must exit fast recovery */
4983 asoc->fast_retran_loss_recovery = 0;
4985 if ((asoc->sat_t3_loss_recovery) &&
4986 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4987 /* end satellite t3 loss recovery */
4988 asoc->sat_t3_loss_recovery = 0;
4993 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4994 if (net->will_exit_fast_recovery) {
4995 /* Ok, we must exit fast recovery */
4996 net->fast_retran_loss_recovery = 0;
5000 /* Adjust and set the new rwnd value */
5001 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5002 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5003 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5005 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5006 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5007 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5008 /* SWS sender side engages */
5009 asoc->peers_rwnd = 0;
5011 if (asoc->peers_rwnd > old_rwnd) {
5012 win_probe_recovery = 1;
5015 * Now we must setup so we have a timer up for anyone with
5021 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5022 if (win_probe_recovery && (net->window_probe)) {
5023 win_probe_recovered = 1;
5025 * Find first chunk that was used with
5026 * window probe and clear the event. Put
5027 * it back into the send queue as if has
5030 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5031 if (tp1->window_probe) {
5032 sctp_window_probe_recovery(stcb, asoc, tp1);
5037 if (net->flight_size) {
5039 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5040 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5041 stcb->sctp_ep, stcb, net);
5043 if (net->window_probe) {
5044 net->window_probe = 0;
5047 if (net->window_probe) {
5049 * In window probes we must assure a timer
5050 * is still running there
5052 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5053 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5054 stcb->sctp_ep, stcb, net);
5057 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5058 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5060 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5065 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5066 (asoc->sent_queue_retran_cnt == 0) &&
5067 (win_probe_recovered == 0) &&
5070 * huh, this should not happen unless all packets are
5071 * PR-SCTP and marked to skip of course.
5073 if (sctp_fs_audit(asoc)) {
5074 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5075 net->flight_size = 0;
5077 asoc->total_flight = 0;
5078 asoc->total_flight_count = 0;
5079 asoc->sent_queue_retran_cnt = 0;
5080 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5081 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5082 sctp_flight_size_increase(tp1);
5083 sctp_total_flight_increase(stcb, tp1);
5084 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5085 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5092 /*********************************************/
5093 /* Here we perform PR-SCTP procedures */
5095 /*********************************************/
5096 /* C1. update advancedPeerAckPoint */
5097 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5098 asoc->advanced_peer_ack_point = cum_ack;
5100 /* C2. try to further move advancedPeerAckPoint ahead */
5101 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5102 struct sctp_tmit_chunk *lchk;
5103 uint32_t old_adv_peer_ack_point;
5105 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5106 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5107 /* C3. See if we need to send a Fwd-TSN */
5108 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5110 * ISSUE with ECN, see FWD-TSN processing.
5112 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5113 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5114 0xee, cum_ack, asoc->advanced_peer_ack_point,
5115 old_adv_peer_ack_point);
5117 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5118 send_forward_tsn(stcb, asoc);
5120 /* try to FR fwd-tsn's that get lost too */
5121 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5122 send_forward_tsn(stcb, asoc);
5127 /* Assure a timer is up */
5128 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5129 stcb->sctp_ep, stcb, lchk->whoTo);
5132 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5133 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5135 stcb->asoc.peers_rwnd,
5136 stcb->asoc.total_flight,
5137 stcb->asoc.total_output_queue_size);
5142 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5145 uint32_t cum_ack, a_rwnd;
5147 cum_ack = ntohl(cp->cumulative_tsn_ack);
5148 /* Arrange so a_rwnd does NOT change */
5149 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5151 /* Now call the express sack handling */
5152 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5156 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5157 struct sctp_stream_in *strmin)
5159 struct sctp_queued_to_read *ctl, *nctl;
5160 struct sctp_association *asoc;
5162 int need_reasm_check = 0, old;
5165 tt = strmin->last_sequence_delivered;
5166 if (asoc->idata_supported) {
5172 * First deliver anything prior to and including the stream no that
5175 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5176 if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
5177 /* this is deliverable now */
5178 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5179 if (ctl->on_strm_q) {
5180 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5181 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5182 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5183 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5186 panic("strmin: %p ctl: %p unknown %d",
5187 strmin, ctl, ctl->on_strm_q);
5192 /* subtract pending on streams */
5193 asoc->size_on_all_streams -= ctl->length;
5194 sctp_ucount_decr(asoc->cnt_on_all_streams);
5195 /* deliver it to at least the delivery-q */
5196 if (stcb->sctp_socket) {
5197 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5198 sctp_add_to_readq(stcb->sctp_ep, stcb,
5200 &stcb->sctp_socket->so_rcv,
5201 1, SCTP_READ_LOCK_HELD,
5202 SCTP_SO_NOT_LOCKED);
5205 /* Its a fragmented message */
5206 if (ctl->first_frag_seen) {
5208 * Make it so this is next to
5209 * deliver, we restore later
5211 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5212 need_reasm_check = 1;
5217 /* no more delivery now. */
5221 if (need_reasm_check) {
5224 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5225 if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
5226 /* Restore the next to deliver unless we are ahead */
5227 strmin->last_sequence_delivered = tt;
5230 /* Left the front Partial one on */
5233 need_reasm_check = 0;
5236 * now we must deliver things in queue the normal way if any are
5239 tt = strmin->last_sequence_delivered + 1;
5240 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5241 if (tt == ctl->sinfo_ssn) {
5242 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5243 /* this is deliverable now */
5244 if (ctl->on_strm_q) {
5245 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5246 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5247 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5248 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5251 panic("strmin: %p ctl: %p unknown %d",
5252 strmin, ctl, ctl->on_strm_q);
5257 /* subtract pending on streams */
5258 asoc->size_on_all_streams -= ctl->length;
5259 sctp_ucount_decr(asoc->cnt_on_all_streams);
5260 /* deliver it to at least the delivery-q */
5261 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5262 if (stcb->sctp_socket) {
5263 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5264 sctp_add_to_readq(stcb->sctp_ep, stcb,
5266 &stcb->sctp_socket->so_rcv, 1,
5267 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5270 tt = strmin->last_sequence_delivered + 1;
5272 /* Its a fragmented message */
5273 if (ctl->first_frag_seen) {
5275 * Make it so this is next to
5278 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5279 need_reasm_check = 1;
5287 if (need_reasm_check) {
5288 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5293 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5294 struct sctp_association *asoc,
5295 uint16_t stream, uint32_t seq)
5297 struct sctp_queued_to_read *control;
5298 struct sctp_stream_in *strm;
5299 struct sctp_tmit_chunk *chk, *nchk;
5302 * For now large messages held on the stream reasm that are complete
5303 * will be tossed too. We could in theory do more work to spin
5304 * through and stop after dumping one msg aka seeing the start of a
5305 * new msg at the head, and call the delivery function... to see if
5306 * it can be delivered... But for now we just dump everything on the
5309 strm = &asoc->strmin[stream];
5310 control = find_reasm_entry(strm, (uint32_t) seq, 0, 0);
5311 if (control == NULL) {
5315 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5316 /* Purge hanging chunks */
5317 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5318 asoc->size_on_reasm_queue -= chk->send_size;
5319 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5321 sctp_m_freem(chk->data);
5324 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5326 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5327 if (control->on_read_q == 0) {
5328 sctp_free_remote_addr(control->whoFrom);
5329 if (control->data) {
5330 sctp_m_freem(control->data);
5331 control->data = NULL;
5333 sctp_free_a_readq(stcb, control);
5339 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5340 struct sctp_forward_tsn_chunk *fwd,
5341 int *abort_flag, struct mbuf *m, int offset)
5343 /* The pr-sctp fwd tsn */
5345 * here we will perform all the data receiver side steps for
5346 * processing FwdTSN, as required in by pr-sctp draft:
5348 * Assume we get FwdTSN(x):
5350 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5351 * others we have 3) examine and update re-ordering queue on
5352 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5353 * report where we are.
5355 struct sctp_association *asoc;
5356 uint32_t new_cum_tsn, gap;
5357 unsigned int i, fwd_sz, m_size;
5359 struct sctp_stream_in *strm;
5360 struct sctp_queued_to_read *ctl, *sv;
5363 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5364 SCTPDBG(SCTP_DEBUG_INDATA1,
5365 "Bad size too small/big fwd-tsn\n");
5368 m_size = (stcb->asoc.mapping_array_size << 3);
5369 /*************************************************************/
5370 /* 1. Here we update local cumTSN and shift the bitmap array */
5371 /*************************************************************/
5372 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5374 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5375 /* Already got there ... */
5379 * now we know the new TSN is more advanced, let's find the actual
5382 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5383 asoc->cumulative_tsn = new_cum_tsn;
5384 if (gap >= m_size) {
5385 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5386 struct mbuf *op_err;
5387 char msg[SCTP_DIAG_INFO_LEN];
5390 * out of range (of single byte chunks in the rwnd I
5391 * give out). This must be an attacker.
5394 snprintf(msg, sizeof(msg),
5395 "New cum ack %8.8x too high, highest TSN %8.8x",
5396 new_cum_tsn, asoc->highest_tsn_inside_map);
5397 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5398 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5399 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5402 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5404 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5405 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5406 asoc->highest_tsn_inside_map = new_cum_tsn;
5408 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5409 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5411 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5412 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5415 SCTP_TCB_LOCK_ASSERT(stcb);
5416 for (i = 0; i <= gap; i++) {
5417 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5418 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5419 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5420 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5421 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5426 /*************************************************************/
5427 /* 2. Clear up re-assembly queue */
5428 /*************************************************************/
5430 /* This is now done as part of clearing up the stream/seq */
5432 /*******************************************************/
5433 /* 3. Update the PR-stream re-ordering queues and fix */
5434 /* delivery issues as needed. */
5435 /*******************************************************/
5436 fwd_sz -= sizeof(*fwd);
5439 unsigned int num_str;
5443 struct sctp_strseq *stseq, strseqbuf;
5444 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5446 offset += sizeof(*fwd);
5448 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5449 if (asoc->idata_supported) {
5450 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5453 num_str = fwd_sz / sizeof(struct sctp_strseq);
5456 for (i = 0; i < num_str; i++) {
5457 if (asoc->idata_supported) {
5458 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5459 sizeof(struct sctp_strseq_mid),
5460 (uint8_t *) & strseqbuf_m);
5461 offset += sizeof(struct sctp_strseq_mid);
5462 if (stseq_m == NULL) {
5465 stream = ntohs(stseq_m->stream);
5466 sequence = ntohl(stseq_m->msg_id);
5468 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5469 sizeof(struct sctp_strseq),
5470 (uint8_t *) & strseqbuf);
5471 offset += sizeof(struct sctp_strseq);
5472 if (stseq == NULL) {
5475 stream = ntohs(stseq->stream);
5476 sequence = (uint32_t) ntohs(stseq->sequence);
5483 * Ok we now look for the stream/seq on the read
5484 * queue where its not all delivered. If we find it
5485 * we transmute the read entry into a PDI_ABORTED.
5487 if (stream >= asoc->streamincnt) {
5488 /* screwed up streams, stop! */
5491 if ((asoc->str_of_pdapi == stream) &&
5492 (asoc->ssn_of_pdapi == sequence)) {
5494 * If this is the one we were partially
5495 * delivering now then we no longer are.
5496 * Note this will change with the reassembly
5499 asoc->fragmented_delivery_inprogress = 0;
5501 strm = &asoc->strmin[stream];
5502 sctp_flush_reassm_for_str_seq(stcb, asoc, stream, sequence);
5503 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5504 if ((ctl->sinfo_stream == stream) &&
5505 (ctl->sinfo_ssn == sequence)) {
5506 str_seq = (stream << 16) | (0x0000ffff & sequence);
5507 ctl->pdapi_aborted = 1;
5508 sv = stcb->asoc.control_pdapi;
5510 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5511 TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5512 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5513 TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5515 } else if (ctl->on_strm_q) {
5516 panic("strm: %p ctl: %p unknown %d",
5517 strm, ctl, ctl->on_strm_q);
5521 stcb->asoc.control_pdapi = ctl;
5522 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5524 SCTP_PARTIAL_DELIVERY_ABORTED,
5526 SCTP_SO_NOT_LOCKED);
5527 stcb->asoc.control_pdapi = sv;
5529 } else if ((ctl->sinfo_stream == stream) &&
5530 SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
5531 /* We are past our victim SSN */
5535 if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
5536 /* Update the sequence number */
5537 strm->last_sequence_delivered = sequence;
5539 /* now kick the stream the new way */
5540 /* sa_ignore NO_NULL_CHK */
5541 sctp_kick_prsctp_reorder_queue(stcb, strm);
5543 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5546 * Now slide thing forward.
5548 sctp_slide_mapping_arrays(stcb);