2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
54 * NOTES: On the outbound side of things I need to check the sack timer to
55 * see if I should generate a sack into the chunk queue (if I have data to
56 * send that is and will be sending it .. for bundling.
58 * The callback in sctp_usrreq.c will get called when the socket is read from.
59 * This will cause sctp_service_queues() to get called on the top entry in
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64 struct sctp_stream_in *strm,
65 struct sctp_tcb *stcb,
66 struct sctp_association *asoc,
67 struct sctp_tmit_chunk *chk, int lock_held);
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 /* Calculate what the rwnd would be */
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
83 * This is really set wrong with respect to a 1-2-m socket. Since
84 * the sb_cc is the count that everyone as put up. When we re-write
85 * sctp_soreceive then we will fix this so that ONLY this
86 * associations data is taken into account.
88 if (stcb->sctp_socket == NULL) {
91 if (stcb->asoc.sb_cc == 0 &&
92 asoc->size_on_reasm_queue == 0 &&
93 asoc->size_on_all_streams == 0) {
94 /* Full rwnd granted */
95 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
98 /* get actual space */
99 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
101 * take out what has NOT been put on socket queue and we yet hold
104 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 asoc->cnt_on_reasm_queue * MSIZE));
106 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 asoc->cnt_on_all_streams * MSIZE));
112 /* what is the overhead of all these rwnd's */
113 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
115 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 * even it is 0. SWS engaged
118 if (calc < stcb->asoc.my_rwnd_control_len) {
127 * Build out our readq entry based on the incoming packet.
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131 struct sctp_nets *net,
132 uint32_t tsn, uint32_t ppid,
133 uint32_t context, uint16_t sid,
134 uint32_t mid, uint8_t flags,
137 struct sctp_queued_to_read *read_queue_e = NULL;
139 sctp_alloc_a_readq(stcb, read_queue_e);
140 if (read_queue_e == NULL) {
143 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 read_queue_e->sinfo_stream = sid;
145 read_queue_e->sinfo_flags = (flags << 8);
146 read_queue_e->sinfo_ppid = ppid;
147 read_queue_e->sinfo_context = context;
148 read_queue_e->sinfo_tsn = tsn;
149 read_queue_e->sinfo_cumtsn = tsn;
150 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
151 read_queue_e->mid = mid;
152 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 TAILQ_INIT(&read_queue_e->reasm);
154 read_queue_e->whoFrom = net;
155 atomic_add_int(&net->ref_count, 1);
156 read_queue_e->data = dm;
157 read_queue_e->stcb = stcb;
158 read_queue_e->port_from = stcb->rport;
160 return (read_queue_e);
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
166 struct sctp_extrcvinfo *seinfo;
167 struct sctp_sndrcvinfo *outinfo;
168 struct sctp_rcvinfo *rcvinfo;
169 struct sctp_nxtinfo *nxtinfo;
176 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 /* user does not want any ancillary data */
183 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
186 seinfo = (struct sctp_extrcvinfo *)sinfo;
187 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
190 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
197 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
200 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
206 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
211 SCTP_BUF_LEN(ret) = 0;
213 /* We need a CMSG header followed by the struct */
214 cmh = mtod(ret, struct cmsghdr *);
216 * Make sure that there is no un-initialized padding between the
217 * cmsg header and cmsg data and after the cmsg data.
220 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 cmh->cmsg_level = IPPROTO_SCTP;
222 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 cmh->cmsg_type = SCTP_RCVINFO;
224 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 rcvinfo->rcv_context = sinfo->sinfo_context;
232 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
237 cmh->cmsg_level = IPPROTO_SCTP;
238 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 cmh->cmsg_type = SCTP_NXTINFO;
240 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 nxtinfo->nxt_flags = 0;
243 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 nxtinfo->nxt_flags |= SCTP_UNORDERED;
246 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 nxtinfo->nxt_flags |= SCTP_COMPLETE;
252 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 cmh->cmsg_level = IPPROTO_SCTP;
260 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
262 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 cmh->cmsg_type = SCTP_EXTRCV;
264 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 cmh->cmsg_type = SCTP_SNDRCV;
270 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
280 uint32_t gap, i, cumackp1;
282 int in_r = 0, in_nr = 0;
284 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
287 cumackp1 = asoc->cumulative_tsn + 1;
288 if (SCTP_TSN_GT(cumackp1, tsn)) {
290 * this tsn is behind the cum ack and thus we don't need to
291 * worry about it being moved from one to the other.
295 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 if ((in_r == 0) && (in_nr == 0)) {
300 panic("Things are really messed up now");
302 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 sctp_print_mapping_array(asoc);
307 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
309 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 asoc->highest_tsn_inside_nr_map = tsn;
313 if (tsn == asoc->highest_tsn_inside_map) {
314 /* We must back down to see what the new highest is */
315 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 asoc->highest_tsn_inside_map = i;
324 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331 struct sctp_association *asoc,
332 struct sctp_queued_to_read *control)
334 struct sctp_queued_to_read *at;
335 struct sctp_readhead *q;
336 uint8_t bits, unordered;
338 bits = (control->sinfo_flags >> 8);
339 unordered = bits & SCTP_DATA_UNORDERED;
341 q = &strm->uno_inqueue;
342 if (asoc->idata_supported == 0) {
343 if (!TAILQ_EMPTY(q)) {
344 /* Only one stream can be here in old style
348 TAILQ_INSERT_TAIL(q, control, next_instrm);
349 control->on_strm_q = SCTP_ON_UNORDERED;
355 if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
356 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
358 if (TAILQ_EMPTY(q)) {
360 TAILQ_INSERT_HEAD(q, control, next_instrm);
362 control->on_strm_q = SCTP_ON_UNORDERED;
364 control->on_strm_q = SCTP_ON_ORDERED;
368 TAILQ_FOREACH(at, q, next_instrm) {
369 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
371 * one in queue is bigger than the new one,
372 * insert before this one
374 TAILQ_INSERT_BEFORE(at, control, next_instrm);
376 control->on_strm_q = SCTP_ON_UNORDERED;
378 control->on_strm_q = SCTP_ON_ORDERED;
381 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
383 * Gak, He sent me a duplicate msg id
384 * number?? return -1 to abort.
388 if (TAILQ_NEXT(at, next_instrm) == NULL) {
390 * We are at the end, insert it
393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
394 sctp_log_strm_del(control, at,
395 SCTP_STR_LOG_FROM_INSERT_TL);
397 TAILQ_INSERT_AFTER(q,
398 at, control, next_instrm);
400 control->on_strm_q = SCTP_ON_UNORDERED;
402 control->on_strm_q = SCTP_ON_ORDERED;
413 sctp_abort_in_reasm(struct sctp_tcb *stcb,
414 struct sctp_queued_to_read *control,
415 struct sctp_tmit_chunk *chk,
416 int *abort_flag, int opspot)
418 char msg[SCTP_DIAG_INFO_LEN];
421 if (stcb->asoc.idata_supported) {
422 snprintf(msg, sizeof(msg),
423 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
425 control->fsn_included,
428 chk->rec.data.fsn, chk->rec.data.mid);
430 snprintf(msg, sizeof(msg),
431 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
433 control->fsn_included,
437 (uint16_t) chk->rec.data.mid);
439 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
440 sctp_m_freem(chk->data);
442 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
443 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
444 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
449 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
452 * The control could not be placed and must be cleaned.
454 struct sctp_tmit_chunk *chk, *nchk;
456 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
457 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
459 sctp_m_freem(chk->data);
461 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
463 sctp_free_a_readq(stcb, control);
467 * Queue the chunk either right into the socket buffer if it is the next one
468 * to go OR put it in the correct place in the delivery queue. If we do
469 * append to the so_buf, keep doing so until we are out of order as
470 * long as the control's entered are non-fragmented.
473 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
474 struct sctp_stream_in *strm,
475 struct sctp_association *asoc,
476 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
479 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
480 * all the data in one stream this could happen quite rapidly. One
481 * could use the TSN to keep track of things, but this scheme breaks
482 * down in the other type of stream usage that could occur. Send a
483 * single msg to stream 0, send 4Billion messages to stream 1, now
484 * send a message to stream 0. You have a situation where the TSN
485 * has wrapped but not in the stream. Is this worth worrying about
486 * or should we just change our queue sort at the bottom to be by
489 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
490 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
491 * assignment this could happen... and I don't see how this would be
492 * a violation. So for now I am undecided an will leave the sort by
493 * SSN alone. Maybe a hybred approach is the answer
496 struct sctp_queued_to_read *at;
500 char msg[SCTP_DIAG_INFO_LEN];
502 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
503 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
505 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
506 /* The incoming sseq is behind where we last delivered? */
507 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
508 control->mid, strm->last_mid_delivered);
511 * throw it in the stream so it gets cleaned up in
512 * association destruction
514 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
515 if (asoc->idata_supported) {
516 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
517 strm->last_mid_delivered, control->sinfo_tsn,
518 control->sinfo_stream, control->mid);
520 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
521 (uint16_t) strm->last_mid_delivered,
523 control->sinfo_stream,
524 (uint16_t) control->mid);
526 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
527 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
528 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
533 if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
537 asoc->size_on_all_streams += control->length;
538 sctp_ucount_incr(asoc->cnt_on_all_streams);
539 nxt_todel = strm->last_mid_delivered + 1;
540 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
541 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
544 so = SCTP_INP_SO(stcb->sctp_ep);
545 atomic_add_int(&stcb->asoc.refcnt, 1);
546 SCTP_TCB_UNLOCK(stcb);
547 SCTP_SOCKET_LOCK(so, 1);
549 atomic_subtract_int(&stcb->asoc.refcnt, 1);
550 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
551 SCTP_SOCKET_UNLOCK(so, 1);
555 /* can be delivered right away? */
556 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
557 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
559 /* EY it wont be queued if it could be delivered directly */
561 asoc->size_on_all_streams -= control->length;
562 sctp_ucount_decr(asoc->cnt_on_all_streams);
563 strm->last_mid_delivered++;
564 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
565 sctp_add_to_readq(stcb->sctp_ep, stcb,
567 &stcb->sctp_socket->so_rcv, 1,
568 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
569 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
571 nxt_todel = strm->last_mid_delivered + 1;
572 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
573 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
574 asoc->size_on_all_streams -= control->length;
575 sctp_ucount_decr(asoc->cnt_on_all_streams);
576 if (control->on_strm_q == SCTP_ON_ORDERED) {
577 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
580 panic("Huh control: %p is on_strm_q: %d",
581 control, control->on_strm_q);
584 control->on_strm_q = 0;
585 strm->last_mid_delivered++;
587 * We ignore the return of deliver_data here
588 * since we always can hold the chunk on the
589 * d-queue. And we have a finite number that
590 * can be delivered from the strq.
592 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
593 sctp_log_strm_del(control, NULL,
594 SCTP_STR_LOG_FROM_IMMED_DEL);
596 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
597 sctp_add_to_readq(stcb->sctp_ep, stcb,
599 &stcb->sctp_socket->so_rcv, 1,
600 SCTP_READ_LOCK_NOT_HELD,
603 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
608 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
609 SCTP_SOCKET_UNLOCK(so, 1);
614 * Ok, we did not deliver this guy, find the correct place
615 * to put it on the queue.
617 if (sctp_place_control_in_stream(strm, asoc, control)) {
618 snprintf(msg, sizeof(msg),
619 "Queue to str MID: %u duplicate",
621 sctp_clean_up_control(stcb, control);
622 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
623 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
624 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
632 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
634 struct mbuf *m, *prev = NULL;
635 struct sctp_tcb *stcb;
637 stcb = control->stcb;
638 control->held_length = 0;
642 if (SCTP_BUF_LEN(m) == 0) {
643 /* Skip mbufs with NO length */
646 control->data = sctp_m_free(m);
649 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
650 m = SCTP_BUF_NEXT(prev);
653 control->tail_mbuf = prev;
658 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
659 if (control->on_read_q) {
661 * On read queue so we must increment the SB stuff,
662 * we assume caller has done any locks of SB.
664 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
666 m = SCTP_BUF_NEXT(m);
669 control->tail_mbuf = prev;
674 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
676 struct mbuf *prev = NULL;
677 struct sctp_tcb *stcb;
679 stcb = control->stcb;
682 panic("Control broken");
687 if (control->tail_mbuf == NULL) {
690 sctp_setup_tail_pointer(control);
693 control->tail_mbuf->m_next = m;
695 if (SCTP_BUF_LEN(m) == 0) {
696 /* Skip mbufs with NO length */
699 control->tail_mbuf->m_next = sctp_m_free(m);
700 m = control->tail_mbuf->m_next;
702 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
703 m = SCTP_BUF_NEXT(prev);
706 control->tail_mbuf = prev;
711 if (control->on_read_q) {
713 * On read queue so we must increment the SB stuff,
714 * we assume caller has done any locks of SB.
716 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
718 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
719 m = SCTP_BUF_NEXT(m);
722 control->tail_mbuf = prev;
727 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
729 memset(nc, 0, sizeof(struct sctp_queued_to_read));
730 nc->sinfo_stream = control->sinfo_stream;
731 nc->mid = control->mid;
732 TAILQ_INIT(&nc->reasm);
733 nc->top_fsn = control->top_fsn;
734 nc->mid = control->mid;
735 nc->sinfo_flags = control->sinfo_flags;
736 nc->sinfo_ppid = control->sinfo_ppid;
737 nc->sinfo_context = control->sinfo_context;
738 nc->fsn_included = 0xffffffff;
739 nc->sinfo_tsn = control->sinfo_tsn;
740 nc->sinfo_cumtsn = control->sinfo_cumtsn;
741 nc->sinfo_assoc_id = control->sinfo_assoc_id;
742 nc->whoFrom = control->whoFrom;
743 atomic_add_int(&nc->whoFrom->ref_count, 1);
744 nc->stcb = control->stcb;
745 nc->port_from = control->port_from;
749 sctp_reset_a_control(struct sctp_queued_to_read *control,
750 struct sctp_inpcb *inp, uint32_t tsn)
752 control->fsn_included = tsn;
753 if (control->on_read_q) {
755 * We have to purge it from there, hopefully this will work
758 TAILQ_REMOVE(&inp->read_queue, control, next);
759 control->on_read_q = 0;
764 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
765 struct sctp_association *asoc,
766 struct sctp_stream_in *strm,
767 struct sctp_queued_to_read *control,
769 int inp_read_lock_held)
772 * Special handling for the old un-ordered data chunk. All the
773 * chunks/TSN's go to mid 0. So we have to do the old style watching
774 * to see if we have it all. If you return one, no other control
775 * entries on the un-ordered queue will be looked at. In theory
776 * there should be no others entries in reality, unless the guy is
777 * sending both unordered NDATA and unordered DATA...
779 struct sctp_tmit_chunk *chk, *lchk, *tchk;
781 struct sctp_queued_to_read *nc;
784 if (control->first_frag_seen == 0) {
785 /* Nothing we can do, we have not seen the first piece yet */
788 /* Collapse any we can */
791 fsn = control->fsn_included + 1;
792 /* Now what can we add? */
793 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
794 if (chk->rec.data.fsn == fsn) {
796 sctp_alloc_a_readq(stcb, nc);
800 memset(nc, 0, sizeof(struct sctp_queued_to_read));
801 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
802 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
806 if (control->end_added) {
808 if (!TAILQ_EMPTY(&control->reasm)) {
810 * Ok we have to move anything left
811 * on the control queue to a new
814 sctp_build_readq_entry_from_ctl(nc, control);
815 tchk = TAILQ_FIRST(&control->reasm);
816 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
817 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
818 asoc->size_on_reasm_queue -= tchk->send_size;
819 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
820 nc->first_frag_seen = 1;
821 nc->fsn_included = tchk->rec.data.fsn;
822 nc->data = tchk->data;
823 nc->sinfo_ppid = tchk->rec.data.ppid;
824 nc->sinfo_tsn = tchk->rec.data.tsn;
825 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
827 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
828 sctp_setup_tail_pointer(nc);
829 tchk = TAILQ_FIRST(&control->reasm);
831 /* Spin the rest onto the queue */
833 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
834 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
835 tchk = TAILQ_FIRST(&control->reasm);
837 /* Now lets add it to the queue
838 * after removing control */
839 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
840 nc->on_strm_q = SCTP_ON_UNORDERED;
841 if (control->on_strm_q) {
842 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
843 control->on_strm_q = 0;
846 if (control->pdapi_started) {
847 strm->pd_api_started = 0;
848 control->pdapi_started = 0;
850 if (control->on_strm_q) {
851 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
852 control->on_strm_q = 0;
853 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
855 if (control->on_read_q == 0) {
856 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
857 &stcb->sctp_socket->so_rcv, control->end_added,
858 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
860 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
861 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
862 /* Switch to the new guy and
867 if (nc->on_strm_q == 0) {
868 sctp_free_a_readq(stcb, nc);
873 sctp_free_a_readq(stcb, nc);
880 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
881 strm->pd_api_started = 1;
882 control->pdapi_started = 1;
883 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
884 &stcb->sctp_socket->so_rcv, control->end_added,
885 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
886 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
894 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
895 struct sctp_association *asoc,
896 struct sctp_queued_to_read *control,
897 struct sctp_tmit_chunk *chk,
900 struct sctp_tmit_chunk *at;
904 * Here we need to place the chunk into the control structure sorted
905 * in the correct order.
907 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
908 /* Its the very first one. */
909 SCTPDBG(SCTP_DEBUG_XXX,
910 "chunk is a first fsn: %u becomes fsn_included\n",
912 if (control->first_frag_seen) {
914 * In old un-ordered we can reassembly on one
915 * control multiple messages. As long as the next
916 * FIRST is greater then the old first (TSN i.e. FSN
922 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
923 /* Easy way the start of a new guy beyond
927 if ((chk->rec.data.fsn == control->fsn_included) ||
928 (control->pdapi_started)) {
930 * Ok this should not happen, if it does we
931 * started the pd-api on the higher TSN
932 * (since the equals part is a TSN failure
935 * We are completly hosed in that case since
936 * I have no way to recover. This really
937 * will only happen if we can get more TSN's
938 * higher before the pd-api-point.
940 sctp_abort_in_reasm(stcb, control, chk,
942 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
947 * Ok we have two firsts and the one we just got is
948 * smaller than the one we previously placed.. yuck!
949 * We must swap them out.
952 tdata = control->data;
953 control->data = chk->data;
955 /* Save the lengths */
956 chk->send_size = control->length;
957 /* Recompute length of control and tail pointer */
958 sctp_setup_tail_pointer(control);
959 /* Fix the FSN included */
960 tmp = control->fsn_included;
961 control->fsn_included = chk->rec.data.fsn;
962 chk->rec.data.fsn = tmp;
963 /* Fix the TSN included */
964 tmp = control->sinfo_tsn;
965 control->sinfo_tsn = chk->rec.data.tsn;
966 chk->rec.data.tsn = tmp;
967 /* Fix the PPID included */
968 tmp = control->sinfo_ppid;
969 control->sinfo_ppid = chk->rec.data.ppid;
970 chk->rec.data.ppid = tmp;
971 /* Fix tail pointer */
974 control->first_frag_seen = 1;
975 control->top_fsn = control->fsn_included = chk->rec.data.fsn;
976 control->sinfo_tsn = chk->rec.data.tsn;
977 control->sinfo_ppid = chk->rec.data.ppid;
978 control->data = chk->data;
979 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
981 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
982 sctp_setup_tail_pointer(control);
987 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
988 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
990 * This one in queue is bigger than the new one,
991 * insert the new one before at.
993 asoc->size_on_reasm_queue += chk->send_size;
994 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
996 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
998 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1000 * They sent a duplicate fsn number. This really
1001 * should not happen since the FSN is a TSN and it
1002 * should have been dropped earlier.
1004 sctp_abort_in_reasm(stcb, control, chk,
1006 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1010 if (inserted == 0) {
1011 /* Its at the end */
1012 asoc->size_on_reasm_queue += chk->send_size;
1013 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1014 control->top_fsn = chk->rec.data.fsn;
1015 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1020 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1021 struct sctp_stream_in *strm, int inp_read_lock_held)
1024 * Given a stream, strm, see if any of the SSN's on it that are
1025 * fragmented are ready to deliver. If so go ahead and place them on
1026 * the read queue. In so placing if we have hit the end, then we
1027 * need to remove them from the stream's queue.
1029 struct sctp_queued_to_read *control, *nctl = NULL;
1030 uint32_t next_to_del;
1034 if (stcb->sctp_socket) {
1035 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1036 stcb->sctp_ep->partial_delivery_point);
1038 pd_point = stcb->sctp_ep->partial_delivery_point;
1040 control = TAILQ_FIRST(&strm->uno_inqueue);
1043 (asoc->idata_supported == 0)) {
1044 /* Special handling needed for "old" data format */
1045 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1049 if (strm->pd_api_started) {
1050 /* Can't add more */
1054 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1055 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1056 nctl = TAILQ_NEXT(control, next_instrm);
1057 if (control->end_added) {
1058 /* We just put the last bit on */
1059 if (control->on_strm_q) {
1061 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1062 panic("Huh control: %p on_q: %d -- not unordered?",
1063 control, control->on_strm_q);
1066 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1067 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1068 control->on_strm_q = 0;
1070 if (control->on_read_q == 0) {
1071 sctp_add_to_readq(stcb->sctp_ep, stcb,
1073 &stcb->sctp_socket->so_rcv, control->end_added,
1074 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1077 /* Can we do a PD-API for this un-ordered guy? */
1078 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1079 strm->pd_api_started = 1;
1080 control->pdapi_started = 1;
1081 sctp_add_to_readq(stcb->sctp_ep, stcb,
1083 &stcb->sctp_socket->so_rcv, control->end_added,
1084 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1092 control = TAILQ_FIRST(&strm->inqueue);
1093 if (strm->pd_api_started) {
1094 /* Can't add more */
1097 if (control == NULL) {
1100 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1102 * Ok the guy at the top was being partially delivered
1103 * completed, so we remove it. Note the pd_api flag was
1104 * taken off when the chunk was merged on in
1105 * sctp_queue_data_for_reasm below.
1107 nctl = TAILQ_NEXT(control, next_instrm);
1108 SCTPDBG(SCTP_DEBUG_XXX,
1109 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1110 control, control->end_added, control->mid,
1111 control->top_fsn, control->fsn_included,
1112 strm->last_mid_delivered);
1113 if (control->end_added) {
1114 if (control->on_strm_q) {
1116 if (control->on_strm_q != SCTP_ON_ORDERED) {
1117 panic("Huh control: %p on_q: %d -- not ordered?",
1118 control, control->on_strm_q);
1121 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1122 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1123 control->on_strm_q = 0;
1125 if (strm->pd_api_started && control->pdapi_started) {
1126 control->pdapi_started = 0;
1127 strm->pd_api_started = 0;
1129 if (control->on_read_q == 0) {
1130 sctp_add_to_readq(stcb->sctp_ep, stcb,
1132 &stcb->sctp_socket->so_rcv, control->end_added,
1133 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1138 if (strm->pd_api_started) {
1139 /* Can't add more must have gotten an un-ordered above being
1140 * partially delivered. */
1144 next_to_del = strm->last_mid_delivered + 1;
1146 SCTPDBG(SCTP_DEBUG_XXX,
1147 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1148 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1150 nctl = TAILQ_NEXT(control, next_instrm);
1151 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1152 (control->first_frag_seen)) {
1155 /* Ok we can deliver it onto the stream. */
1156 if (control->end_added) {
1157 /* We are done with it afterwards */
1158 if (control->on_strm_q) {
1160 if (control->on_strm_q != SCTP_ON_ORDERED) {
1161 panic("Huh control: %p on_q: %d -- not ordered?",
1162 control, control->on_strm_q);
1165 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1166 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1167 control->on_strm_q = 0;
1171 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1172 /* A singleton now slipping through - mark
1173 * it non-revokable too */
1174 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1175 } else if (control->end_added == 0) {
1176 /* Check if we can defer adding until its
1178 if ((control->length < pd_point) || (strm->pd_api_started)) {
1179 /* Don't need it or cannot add more
1180 * (one being delivered that way) */
1184 done = (control->end_added) && (control->last_frag_seen);
1185 if (control->on_read_q == 0) {
1186 sctp_add_to_readq(stcb->sctp_ep, stcb,
1188 &stcb->sctp_socket->so_rcv, control->end_added,
1189 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1191 strm->last_mid_delivered = next_to_del;
1196 /* We are now doing PD API */
1197 strm->pd_api_started = 1;
1198 control->pdapi_started = 1;
1208 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1209 struct sctp_stream_in *strm,
1210 struct sctp_tcb *stcb, struct sctp_association *asoc,
1211 struct sctp_tmit_chunk *chk, int hold_rlock)
1214 * Given a control and a chunk, merge the data from the chk onto the
1215 * control and free up the chunk resources.
1219 if (control->on_read_q && (hold_rlock == 0)) {
1221 * Its being pd-api'd so we must do some locks.
1223 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1226 if (control->data == NULL) {
1227 control->data = chk->data;
1228 sctp_setup_tail_pointer(control);
1230 sctp_add_to_tail_pointer(control, chk->data);
1232 control->fsn_included = chk->rec.data.fsn;
1233 asoc->size_on_reasm_queue -= chk->send_size;
1234 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1235 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1237 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1238 control->first_frag_seen = 1;
1240 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1242 if ((control->on_strm_q) && (control->on_read_q)) {
1243 if (control->pdapi_started) {
1244 control->pdapi_started = 0;
1245 strm->pd_api_started = 0;
1247 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1249 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1250 control->on_strm_q = 0;
1251 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1253 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1254 control->on_strm_q = 0;
1256 } else if (control->on_strm_q) {
1257 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1258 control->on_strm_q);
1262 control->end_added = 1;
1263 control->last_frag_seen = 1;
1266 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1268 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1272 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1273 * queue, see if anthing can be delivered. If so pull it off (or as much as
1274 * we can. If we run out of space then we must dump what we can and set the
1275 * appropriate flag to say we queued what we could.
1278 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1279 struct sctp_stream_in *strm,
1280 struct sctp_queued_to_read *control,
1281 struct sctp_tmit_chunk *chk,
1282 int created_control,
1283 int *abort_flag, uint32_t tsn)
1286 struct sctp_tmit_chunk *at, *nat;
1287 int do_wakeup, unordered;
1290 * For old un-ordered data chunks.
1292 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1297 /* Must be added to the stream-in queue */
1298 if (created_control) {
1299 if (sctp_place_control_in_stream(strm, asoc, control)) {
1300 /* Duplicate SSN? */
1301 sctp_clean_up_control(stcb, control);
1302 sctp_abort_in_reasm(stcb, control, chk,
1304 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1307 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1309 * Ok we created this control and now lets validate
1310 * that its legal i.e. there is a B bit set, if not
1311 * and we have up to the cum-ack then its invalid.
1313 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1314 sctp_abort_in_reasm(stcb, control, chk,
1316 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1321 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1322 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1326 * Ok we must queue the chunk into the reasembly portion: o if its
1327 * the first it goes to the control mbuf. o if its not first but the
1328 * next in sequence it goes to the control, and each succeeding one
1329 * in order also goes. o if its not in order we place it on the list
1332 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1333 /* Its the very first one. */
1334 SCTPDBG(SCTP_DEBUG_XXX,
1335 "chunk is a first fsn: %u becomes fsn_included\n",
1337 if (control->first_frag_seen) {
1339 * Error on senders part, they either sent us two
1340 * data chunks with FIRST, or they sent two
1341 * un-ordered chunks that were fragmented at the
1342 * same time in the same stream.
1344 sctp_abort_in_reasm(stcb, control, chk,
1346 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1349 control->first_frag_seen = 1;
1350 control->fsn_included = chk->rec.data.fsn;
1351 control->data = chk->data;
1352 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1354 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1355 sctp_setup_tail_pointer(control);
1357 /* Place the chunk in our list */
1360 if (control->last_frag_seen == 0) {
1361 /* Still willing to raise highest FSN seen */
1362 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1363 SCTPDBG(SCTP_DEBUG_XXX,
1364 "We have a new top_fsn: %u\n",
1366 control->top_fsn = chk->rec.data.fsn;
1368 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1369 SCTPDBG(SCTP_DEBUG_XXX,
1370 "The last fsn is now in place fsn: %u\n",
1372 control->last_frag_seen = 1;
1374 if (asoc->idata_supported || control->first_frag_seen) {
1376 * For IDATA we always check since we know
1377 * that the first fragment is 0. For old
1378 * DATA we have to receive the first before
1379 * we know the first FSN (which is the TSN).
1381 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1382 /* We have already delivered up to
1383 * this so its a dup */
1384 sctp_abort_in_reasm(stcb, control, chk,
1386 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1391 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1392 /* Second last? huh? */
1393 SCTPDBG(SCTP_DEBUG_XXX,
1394 "Duplicate last fsn: %u (top: %u) -- abort\n",
1395 chk->rec.data.fsn, control->top_fsn);
1396 sctp_abort_in_reasm(stcb, control,
1398 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1401 if (asoc->idata_supported || control->first_frag_seen) {
1403 * For IDATA we always check since we know
1404 * that the first fragment is 0. For old
1405 * DATA we have to receive the first before
1406 * we know the first FSN (which is the TSN).
1409 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1410 /* We have already delivered up to
1411 * this so its a dup */
1412 SCTPDBG(SCTP_DEBUG_XXX,
1413 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1414 chk->rec.data.fsn, control->fsn_included);
1415 sctp_abort_in_reasm(stcb, control, chk,
1417 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1421 /* validate not beyond top FSN if we have seen last
1423 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1424 SCTPDBG(SCTP_DEBUG_XXX,
1425 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1428 sctp_abort_in_reasm(stcb, control, chk,
1430 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1435 * If we reach here, we need to place the new chunk in the
1436 * reassembly for this control.
1438 SCTPDBG(SCTP_DEBUG_XXX,
1439 "chunk is a not first fsn: %u needs to be inserted\n",
1441 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1442 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1444 * This one in queue is bigger than the new
1445 * one, insert the new one before at.
1447 SCTPDBG(SCTP_DEBUG_XXX,
1448 "Insert it before fsn: %u\n",
1450 asoc->size_on_reasm_queue += chk->send_size;
1451 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1452 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1455 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1456 /* Gak, He sent me a duplicate str seq
1459 * foo bar, I guess I will just free this
1460 * new guy, should we abort too? FIX ME
1461 * MAYBE? Or it COULD be that the SSN's have
1462 * wrapped. Maybe I should compare to TSN
1463 * somehow... sigh for now just blow away
1466 SCTPDBG(SCTP_DEBUG_XXX,
1467 "Duplicate to fsn: %u -- abort\n",
1469 sctp_abort_in_reasm(stcb, control,
1471 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1475 if (inserted == 0) {
1476 /* Goes on the end */
1477 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1479 asoc->size_on_reasm_queue += chk->send_size;
1480 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1481 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1485 * Ok lets see if we can suck any up into the control structure that
1486 * are in seq if it makes sense.
1490 * If the first fragment has not been seen there is no sense in
1493 if (control->first_frag_seen) {
1494 next_fsn = control->fsn_included + 1;
1495 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1496 if (at->rec.data.fsn == next_fsn) {
1497 /* We can add this one now to the control */
1498 SCTPDBG(SCTP_DEBUG_XXX,
1499 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1502 next_fsn, control->fsn_included);
1503 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1504 sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1505 if (control->on_read_q) {
1509 if (control->end_added && control->pdapi_started) {
1510 if (strm->pd_api_started) {
1511 strm->pd_api_started = 0;
1512 control->pdapi_started = 0;
1514 if (control->on_read_q == 0) {
1515 sctp_add_to_readq(stcb->sctp_ep, stcb,
1517 &stcb->sctp_socket->so_rcv, control->end_added,
1518 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1529 /* Need to wakeup the reader */
1530 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1534 static struct sctp_queued_to_read *
1535 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1537 struct sctp_queued_to_read *control;
1540 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1541 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1546 if (idata_supported) {
1547 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1548 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1553 control = TAILQ_FIRST(&strm->uno_inqueue);
1560 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1561 struct mbuf **m, int offset, int chk_length,
1562 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1563 int *break_flag, int last_chunk, uint8_t chtype)
1565 /* Process a data chunk */
1566 /* struct sctp_tmit_chunk *chk; */
1567 struct sctp_data_chunk *ch;
1568 struct sctp_idata_chunk *nch, chunk_buf;
1569 struct sctp_tmit_chunk *chk;
1570 uint32_t tsn, fsn, gap, mid;
1573 int need_reasm_check = 0;
1575 struct mbuf *op_err;
1576 char msg[SCTP_DIAG_INFO_LEN];
1577 struct sctp_queued_to_read *control = NULL;
1579 uint8_t chunk_flags;
1580 struct sctp_stream_reset_list *liste;
1581 struct sctp_stream_in *strm;
1584 int created_control = 0;
1587 if (chtype == SCTP_IDATA) {
1588 nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1589 sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1590 ch = (struct sctp_data_chunk *)nch;
1591 clen = sizeof(struct sctp_idata_chunk);
1592 tsn = ntohl(ch->dp.tsn);
1593 mid = ntohl(nch->dp.mid);
1594 ppid = nch->dp.ppid_fsn.ppid;
1595 if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1598 fsn = ntohl(nch->dp.ppid_fsn.fsn);
1600 ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1601 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1602 tsn = ntohl(ch->dp.tsn);
1604 clen = sizeof(struct sctp_data_chunk);
1606 mid = (uint32_t) (ntohs(ch->dp.ssn));
1609 chunk_flags = ch->ch.chunk_flags;
1610 if ((size_t)chk_length == clen) {
1612 * Need to send an abort since we had a empty data chunk.
1614 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1615 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1616 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1620 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1621 asoc->send_sack = 1;
1623 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1624 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1625 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1630 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1631 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1632 /* It is a duplicate */
1633 SCTP_STAT_INCR(sctps_recvdupdata);
1634 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1635 /* Record a dup for the next outbound sack */
1636 asoc->dup_tsns[asoc->numduptsns] = tsn;
1639 asoc->send_sack = 1;
1642 /* Calculate the number of TSN's between the base and this TSN */
1643 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1644 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1645 /* Can't hold the bit in the mapping at max array, toss it */
1648 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1649 SCTP_TCB_LOCK_ASSERT(stcb);
1650 if (sctp_expand_mapping_array(asoc, gap)) {
1651 /* Can't expand, drop it */
1655 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1658 /* See if we have received this one already */
1659 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1660 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1661 SCTP_STAT_INCR(sctps_recvdupdata);
1662 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1663 /* Record a dup for the next outbound sack */
1664 asoc->dup_tsns[asoc->numduptsns] = tsn;
1667 asoc->send_sack = 1;
1671 * Check to see about the GONE flag, duplicates would cause a sack
1672 * to be sent up above
1674 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1675 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1676 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1678 * wait a minute, this guy is gone, there is no longer a
1679 * receiver. Send peer an ABORT!
1681 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1682 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1687 * Now before going further we see if there is room. If NOT then we
1688 * MAY let one through only IF this TSN is the one we are waiting
1689 * for on a partial delivery API.
1692 /* Is the stream valid? */
1693 sid = ntohs(ch->dp.sid);
1695 if (sid >= asoc->streamincnt) {
1696 struct sctp_error_invalid_stream *cause;
1698 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1699 0, M_NOWAIT, 1, MT_DATA);
1700 if (op_err != NULL) {
1701 /* add some space up front so prepend will work well */
1702 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1703 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1705 * Error causes are just param's and this one has
1706 * two back to back phdr, one with the error type
1707 * and size, the other with the streamid and a rsvd
1709 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1710 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1711 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1712 cause->stream_id = ch->dp.sid;
1713 cause->reserved = htons(0);
1714 sctp_queue_op_err(stcb, op_err);
1716 SCTP_STAT_INCR(sctps_badsid);
1717 SCTP_TCB_LOCK_ASSERT(stcb);
1718 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1719 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1720 asoc->highest_tsn_inside_nr_map = tsn;
1722 if (tsn == (asoc->cumulative_tsn + 1)) {
1723 /* Update cum-ack */
1724 asoc->cumulative_tsn = tsn;
1728 strm = &asoc->strmin[sid];
1730 * If its a fragmented message, lets see if we can find the control
1731 * on the reassembly queues.
1733 if ((chtype == SCTP_IDATA) &&
1734 ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1737 * The first *must* be fsn 0, and other (middle/end) pieces
1738 * can *not* be fsn 0. XXX: This can happen in case of a
1739 * wrap around. Ignore is for now.
1741 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1745 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
1746 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1747 chunk_flags, control);
1748 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1749 /* See if we can find the re-assembly entity */
1750 if (control != NULL) {
1751 /* We found something, does it belong? */
1752 if (ordered && (mid != control->mid)) {
1753 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1755 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1756 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1757 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1761 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1762 /* We can't have a switched order with an
1763 * unordered chunk */
1764 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1768 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1769 /* We can't have a switched unordered with a
1771 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1778 * Its a complete segment. Lets validate we don't have a
1779 * re-assembly going on with the same Stream/Seq (for
1780 * ordered) or in the same Stream for unordered.
1782 if (control != NULL) {
1783 if (ordered || asoc->idata_supported) {
1784 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1786 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1789 if ((tsn == control->fsn_included + 1) &&
1790 (control->end_added == 0)) {
1791 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1799 /* now do the tests */
1800 if (((asoc->cnt_on_all_streams +
1801 asoc->cnt_on_reasm_queue +
1802 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1803 (((int)asoc->my_rwnd) <= 0)) {
1805 * When we have NO room in the rwnd we check to make sure
1806 * the reader is doing its job...
1808 if (stcb->sctp_socket->so_rcv.sb_cc) {
1809 /* some to read, wake-up */
1810 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1813 so = SCTP_INP_SO(stcb->sctp_ep);
1814 atomic_add_int(&stcb->asoc.refcnt, 1);
1815 SCTP_TCB_UNLOCK(stcb);
1816 SCTP_SOCKET_LOCK(so, 1);
1817 SCTP_TCB_LOCK(stcb);
1818 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1819 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1820 /* assoc was freed while we were unlocked */
1821 SCTP_SOCKET_UNLOCK(so, 1);
1825 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1826 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1827 SCTP_SOCKET_UNLOCK(so, 1);
1830 /* now is it in the mapping array of what we have accepted? */
1832 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1833 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1834 /* Nope not in the valid range dump it */
1836 sctp_set_rwnd(stcb, asoc);
1837 if ((asoc->cnt_on_all_streams +
1838 asoc->cnt_on_reasm_queue +
1839 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1840 SCTP_STAT_INCR(sctps_datadropchklmt);
1842 SCTP_STAT_INCR(sctps_datadroprwnd);
1848 if (control == NULL) {
1851 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1856 #ifdef SCTP_ASOCLOG_OF_TSNS
1857 SCTP_TCB_LOCK_ASSERT(stcb);
1858 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1859 asoc->tsn_in_at = 0;
1860 asoc->tsn_in_wrapped = 1;
1862 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1863 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1864 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1865 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1866 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1867 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1868 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1869 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1873 * Before we continue lets validate that we are not being fooled by
1874 * an evil attacker. We can only have Nk chunks based on our TSN
1875 * spread allowed by the mapping array N * 8 bits, so there is no
1876 * way our stream sequence numbers could have wrapped. We of course
1877 * only validate the FIRST fragment so the bit must be set.
1879 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1880 (TAILQ_EMPTY(&asoc->resetHead)) &&
1881 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1882 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1883 /* The incoming sseq is behind where we last delivered? */
1884 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1885 mid, asoc->strmin[sid].last_mid_delivered);
1887 if (asoc->idata_supported) {
1888 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
1889 asoc->strmin[sid].last_mid_delivered,
1894 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1895 (uint16_t) asoc->strmin[sid].last_mid_delivered,
1900 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1901 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1902 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1906 /************************************
1907 * From here down we may find ch-> invalid
1908 * so its a good idea NOT to use it.
1909 *************************************/
1911 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1913 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1915 if (last_chunk == 0) {
1917 dmbuf = SCTP_M_COPYM(*m,
1918 (offset + sizeof(struct sctp_idata_chunk)),
1921 dmbuf = SCTP_M_COPYM(*m,
1922 (offset + sizeof(struct sctp_data_chunk)),
1925 #ifdef SCTP_MBUF_LOGGING
1926 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1927 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1931 /* We can steal the last chunk */
1935 /* lop off the top part */
1937 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1939 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1941 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1942 l_len = SCTP_BUF_LEN(dmbuf);
1945 * need to count up the size hopefully does not hit
1951 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1952 l_len += SCTP_BUF_LEN(lat);
1955 if (l_len > the_len) {
1956 /* Trim the end round bytes off too */
1957 m_adj(dmbuf, -(l_len - the_len));
1960 if (dmbuf == NULL) {
1961 SCTP_STAT_INCR(sctps_nomem);
1965 * Now no matter what we need a control, get one if we don't have
1966 * one (we may have gotten it above when we found the message was
1969 if (control == NULL) {
1970 sctp_alloc_a_readq(stcb, control);
1971 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1976 if (control == NULL) {
1977 SCTP_STAT_INCR(sctps_nomem);
1980 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1981 control->data = dmbuf;
1982 control->tail_mbuf = NULL;
1983 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1984 control->top_fsn = control->fsn_included = fsn;
1986 created_control = 1;
1988 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
1989 chunk_flags, ordered, mid, control);
1990 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1991 TAILQ_EMPTY(&asoc->resetHead) &&
1993 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
1994 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
1995 /* Candidate for express delivery */
1997 * Its not fragmented, No PD-API is up, Nothing in the
1998 * delivery queue, Its un-ordered OR ordered and the next to
1999 * deliver AND nothing else is stuck on the stream queue,
2000 * And there is room for it in the socket buffer. Lets just
2001 * stuff it up the buffer....
2003 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2004 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2005 asoc->highest_tsn_inside_nr_map = tsn;
2007 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2010 sctp_add_to_readq(stcb->sctp_ep, stcb,
2011 control, &stcb->sctp_socket->so_rcv,
2012 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2014 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2015 /* for ordered, bump what we delivered */
2016 strm->last_mid_delivered++;
2018 SCTP_STAT_INCR(sctps_recvexpress);
2019 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2020 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2021 SCTP_STR_LOG_FROM_EXPRS_DEL);
2024 goto finish_express_del;
2026 /* Now will we need a chunk too? */
2027 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2028 sctp_alloc_a_chunk(stcb, chk);
2030 /* No memory so we drop the chunk */
2031 SCTP_STAT_INCR(sctps_nomem);
2032 if (last_chunk == 0) {
2033 /* we copied it, free the copy */
2034 sctp_m_freem(dmbuf);
2038 chk->rec.data.tsn = tsn;
2039 chk->no_fr_allowed = 0;
2040 chk->rec.data.fsn = fsn;
2041 chk->rec.data.mid = mid;
2042 chk->rec.data.sid = sid;
2043 chk->rec.data.ppid = ppid;
2044 chk->rec.data.context = stcb->asoc.context;
2045 chk->rec.data.doing_fast_retransmit = 0;
2046 chk->rec.data.rcv_flags = chunk_flags;
2048 chk->send_size = the_len;
2050 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2053 atomic_add_int(&net->ref_count, 1);
2056 /* Set the appropriate TSN mark */
2057 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2058 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2059 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2060 asoc->highest_tsn_inside_nr_map = tsn;
2063 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2064 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2065 asoc->highest_tsn_inside_map = tsn;
2068 /* Now is it complete (i.e. not fragmented)? */
2069 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2071 * Special check for when streams are resetting. We could be
2072 * more smart about this and check the actual stream to see
2073 * if it is not being reset.. that way we would not create a
2074 * HOLB when amongst streams being reset and those not being
2078 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2079 SCTP_TSN_GT(tsn, liste->tsn)) {
2081 * yep its past where we need to reset... go ahead
2084 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2086 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2088 struct sctp_queued_to_read *ctlOn, *nctlOn;
2089 unsigned char inserted = 0;
2091 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2092 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2097 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2102 if (inserted == 0) {
2104 * must be put at end, use prevP
2105 * (all setup from loop) to setup
2108 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2111 goto finish_express_del;
2113 if (chunk_flags & SCTP_DATA_UNORDERED) {
2114 /* queue directly into socket buffer */
2115 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2117 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2118 sctp_add_to_readq(stcb->sctp_ep, stcb,
2120 &stcb->sctp_socket->so_rcv, 1,
2121 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2124 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2126 sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2134 goto finish_express_del;
2136 /* If we reach here its a reassembly */
2137 need_reasm_check = 1;
2138 SCTPDBG(SCTP_DEBUG_XXX,
2139 "Queue data to stream for reasm control: %p MID: %u\n",
2141 sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2144 * the assoc is now gone and chk was put onto the reasm
2145 * queue, which has all been freed.
2153 /* Here we tidy up things */
2154 if (tsn == (asoc->cumulative_tsn + 1)) {
2155 /* Update cum-ack */
2156 asoc->cumulative_tsn = tsn;
2162 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2164 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2166 SCTP_STAT_INCR(sctps_recvdata);
2167 /* Set it present please */
2168 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2169 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2171 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2172 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2173 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2175 /* check the special flag for stream resets */
2176 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2177 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2179 * we have finished working through the backlogged TSN's now
2180 * time to reset streams. 1: call reset function. 2: free
2181 * pending_reply space 3: distribute any chunks in
2182 * pending_reply_queue.
2184 struct sctp_queued_to_read *ctl, *nctl;
2186 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2187 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2188 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2189 SCTP_FREE(liste, SCTP_M_STRESET);
2190 /* sa_ignore FREED_MEMORY */
2191 liste = TAILQ_FIRST(&asoc->resetHead);
2192 if (TAILQ_EMPTY(&asoc->resetHead)) {
2193 /* All can be removed */
2194 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2195 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2196 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2202 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2203 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2207 * if ctl->sinfo_tsn is <= liste->tsn we can
2208 * process it which is the NOT of
2209 * ctl->sinfo_tsn > liste->tsn
2211 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2212 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2219 * Now service re-assembly to pick up anything that has been
2220 * held on reassembly queue?
2222 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2223 need_reasm_check = 0;
2225 if (need_reasm_check) {
2226 /* Another one waits ? */
2227 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2232 static const int8_t sctp_map_lookup_tab[256] = {
2233 0, 1, 0, 2, 0, 1, 0, 3,
2234 0, 1, 0, 2, 0, 1, 0, 4,
2235 0, 1, 0, 2, 0, 1, 0, 3,
2236 0, 1, 0, 2, 0, 1, 0, 5,
2237 0, 1, 0, 2, 0, 1, 0, 3,
2238 0, 1, 0, 2, 0, 1, 0, 4,
2239 0, 1, 0, 2, 0, 1, 0, 3,
2240 0, 1, 0, 2, 0, 1, 0, 6,
2241 0, 1, 0, 2, 0, 1, 0, 3,
2242 0, 1, 0, 2, 0, 1, 0, 4,
2243 0, 1, 0, 2, 0, 1, 0, 3,
2244 0, 1, 0, 2, 0, 1, 0, 5,
2245 0, 1, 0, 2, 0, 1, 0, 3,
2246 0, 1, 0, 2, 0, 1, 0, 4,
2247 0, 1, 0, 2, 0, 1, 0, 3,
2248 0, 1, 0, 2, 0, 1, 0, 7,
2249 0, 1, 0, 2, 0, 1, 0, 3,
2250 0, 1, 0, 2, 0, 1, 0, 4,
2251 0, 1, 0, 2, 0, 1, 0, 3,
2252 0, 1, 0, 2, 0, 1, 0, 5,
2253 0, 1, 0, 2, 0, 1, 0, 3,
2254 0, 1, 0, 2, 0, 1, 0, 4,
2255 0, 1, 0, 2, 0, 1, 0, 3,
2256 0, 1, 0, 2, 0, 1, 0, 6,
2257 0, 1, 0, 2, 0, 1, 0, 3,
2258 0, 1, 0, 2, 0, 1, 0, 4,
2259 0, 1, 0, 2, 0, 1, 0, 3,
2260 0, 1, 0, 2, 0, 1, 0, 5,
2261 0, 1, 0, 2, 0, 1, 0, 3,
2262 0, 1, 0, 2, 0, 1, 0, 4,
2263 0, 1, 0, 2, 0, 1, 0, 3,
2264 0, 1, 0, 2, 0, 1, 0, 8
2269 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2272 * Now we also need to check the mapping array in a couple of ways.
2273 * 1) Did we move the cum-ack point?
2275 * When you first glance at this you might think that all entries
2276 * that make up the position of the cum-ack would be in the
2277 * nr-mapping array only.. i.e. things up to the cum-ack are always
2278 * deliverable. Thats true with one exception, when its a fragmented
2279 * message we may not deliver the data until some threshold (or all
2280 * of it) is in place. So we must OR the nr_mapping_array and
2281 * mapping_array to get a true picture of the cum-ack.
2283 struct sctp_association *asoc;
2286 int slide_from, slide_end, lgap, distance;
2287 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2291 old_cumack = asoc->cumulative_tsn;
2292 old_base = asoc->mapping_array_base_tsn;
2293 old_highest = asoc->highest_tsn_inside_map;
2295 * We could probably improve this a small bit by calculating the
2296 * offset of the current cum-ack as the starting point.
2299 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2300 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2304 /* there is a 0 bit */
2305 at += sctp_map_lookup_tab[val];
2309 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2311 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2312 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2314 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2315 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2317 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2318 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2319 sctp_print_mapping_array(asoc);
2320 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2321 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2323 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2324 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2327 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2328 highest_tsn = asoc->highest_tsn_inside_nr_map;
2330 highest_tsn = asoc->highest_tsn_inside_map;
2332 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2333 /* The complete array was completed by a single FR */
2334 /* highest becomes the cum-ack */
2340 /* clear the array */
2341 clr = ((at + 7) >> 3);
2342 if (clr > asoc->mapping_array_size) {
2343 clr = asoc->mapping_array_size;
2345 memset(asoc->mapping_array, 0, clr);
2346 memset(asoc->nr_mapping_array, 0, clr);
2348 for (i = 0; i < asoc->mapping_array_size; i++) {
2349 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2350 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2351 sctp_print_mapping_array(asoc);
2355 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2356 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2357 } else if (at >= 8) {
2358 /* we can slide the mapping array down */
2359 /* slide_from holds where we hit the first NON 0xff byte */
2362 * now calculate the ceiling of the move using our highest
2365 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2366 slide_end = (lgap >> 3);
2367 if (slide_end < slide_from) {
2368 sctp_print_mapping_array(asoc);
2370 panic("impossible slide");
2372 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2373 lgap, slide_end, slide_from, at);
2377 if (slide_end > asoc->mapping_array_size) {
2379 panic("would overrun buffer");
2381 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2382 asoc->mapping_array_size, slide_end);
2383 slide_end = asoc->mapping_array_size;
2386 distance = (slide_end - slide_from) + 1;
2387 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2388 sctp_log_map(old_base, old_cumack, old_highest,
2389 SCTP_MAP_PREPARE_SLIDE);
2390 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2391 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2393 if (distance + slide_from > asoc->mapping_array_size ||
2396 * Here we do NOT slide forward the array so that
2397 * hopefully when more data comes in to fill it up
2398 * we will be able to slide it forward. Really I
2399 * don't think this should happen :-0
2402 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2403 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2404 (uint32_t) asoc->mapping_array_size,
2405 SCTP_MAP_SLIDE_NONE);
2410 for (ii = 0; ii < distance; ii++) {
2411 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2412 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2415 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2416 asoc->mapping_array[ii] = 0;
2417 asoc->nr_mapping_array[ii] = 0;
2419 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2420 asoc->highest_tsn_inside_map += (slide_from << 3);
2422 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2423 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2425 asoc->mapping_array_base_tsn += (slide_from << 3);
2426 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2427 sctp_log_map(asoc->mapping_array_base_tsn,
2428 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2429 SCTP_MAP_SLIDE_RESULT);
2436 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2438 struct sctp_association *asoc;
2439 uint32_t highest_tsn;
2442 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2443 highest_tsn = asoc->highest_tsn_inside_nr_map;
2445 highest_tsn = asoc->highest_tsn_inside_map;
2449 * Now we need to see if we need to queue a sack or just start the
2450 * timer (if allowed).
2452 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2454 * Ok special case, in SHUTDOWN-SENT case. here we maker
2455 * sure SACK timer is off and instead send a SHUTDOWN and a
2458 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2459 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2460 stcb->sctp_ep, stcb, NULL,
2461 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2463 sctp_send_shutdown(stcb,
2464 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2465 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2469 /* is there a gap now ? */
2470 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2473 * CMT DAC algorithm: increase number of packets received
2476 stcb->asoc.cmt_dac_pkts_rcvd++;
2478 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2480 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2482 (stcb->asoc.numduptsns) || /* we have dup's */
2483 (is_a_gap) || /* is still a gap */
2484 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2485 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ ) {
2487 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2488 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2489 (stcb->asoc.send_sack == 0) &&
2490 (stcb->asoc.numduptsns == 0) &&
2491 (stcb->asoc.delayed_ack) &&
2492 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2495 * CMT DAC algorithm: With CMT, delay acks
2496 * even in the face of
2498 * reordering. Therefore, if acks that do
2499 * not have to be sent because of the above
2500 * reasons, will be delayed. That is, acks
2501 * that would have been sent due to gap
2502 * reports will be delayed with DAC. Start
2503 * the delayed ack timer.
2505 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2506 stcb->sctp_ep, stcb, NULL);
2509 * Ok we must build a SACK since the timer
2510 * is pending, we got our first packet OR
2511 * there are gaps or duplicates.
2513 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2514 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2517 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2518 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2519 stcb->sctp_ep, stcb, NULL);
2526 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2527 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2528 struct sctp_nets *net, uint32_t * high_tsn)
2530 struct sctp_chunkhdr *ch, chunk_buf;
2531 struct sctp_association *asoc;
2532 int num_chunks = 0; /* number of control chunks processed */
2534 int chk_length, break_flag, last_chunk;
2535 int abort_flag = 0, was_a_gap;
2537 uint32_t highest_tsn;
2540 sctp_set_rwnd(stcb, &stcb->asoc);
2543 SCTP_TCB_LOCK_ASSERT(stcb);
2545 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2546 highest_tsn = asoc->highest_tsn_inside_nr_map;
2548 highest_tsn = asoc->highest_tsn_inside_map;
2550 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2552 * setup where we got the last DATA packet from for any SACK that
2553 * may need to go out. Don't bump the net. This is done ONLY when a
2554 * chunk is assigned.
2556 asoc->last_data_chunk_from = net;
2559 * Now before we proceed we must figure out if this is a wasted
2560 * cluster... i.e. it is a small packet sent in and yet the driver
2561 * underneath allocated a full cluster for it. If so we must copy it
2562 * to a smaller mbuf and free up the cluster mbuf. This will help
2563 * with cluster starvation. Note for __Panda__ we don't do this
2564 * since it has clusters all the way down to 64 bytes.
2566 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2567 /* we only handle mbufs that are singletons.. not chains */
2568 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2570 /* ok lets see if we can copy the data up */
2573 /* get the pointers and copy */
2574 to = mtod(m, caddr_t *);
2575 from = mtod((*mm), caddr_t *);
2576 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2577 /* copy the length and free up the old */
2578 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2580 /* success, back copy */
2583 /* We are in trouble in the mbuf world .. yikes */
2587 /* get pointer to the first chunk header */
2588 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2589 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2594 * process all DATA chunks...
2596 *high_tsn = asoc->cumulative_tsn;
2598 asoc->data_pkts_seen++;
2599 while (stop_proc == 0) {
2600 /* validate chunk length */
2601 chk_length = ntohs(ch->chunk_length);
2602 if (length - *offset < chk_length) {
2603 /* all done, mutulated chunk */
2607 if ((asoc->idata_supported == 1) &&
2608 (ch->chunk_type == SCTP_DATA)) {
2609 struct mbuf *op_err;
2610 char msg[SCTP_DIAG_INFO_LEN];
2612 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2613 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2614 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2615 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2618 if ((asoc->idata_supported == 0) &&
2619 (ch->chunk_type == SCTP_IDATA)) {
2620 struct mbuf *op_err;
2621 char msg[SCTP_DIAG_INFO_LEN];
2623 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2624 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2625 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2626 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2629 if ((ch->chunk_type == SCTP_DATA) ||
2630 (ch->chunk_type == SCTP_IDATA)) {
2633 if (ch->chunk_type == SCTP_DATA) {
2634 clen = sizeof(struct sctp_data_chunk);
2636 clen = sizeof(struct sctp_idata_chunk);
2638 if (chk_length < clen) {
2640 * Need to send an abort since we had a
2641 * invalid data chunk.
2643 struct mbuf *op_err;
2644 char msg[SCTP_DIAG_INFO_LEN];
2646 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2648 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2649 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2650 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2653 #ifdef SCTP_AUDITING_ENABLED
2654 sctp_audit_log(0xB1, 0);
2656 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2661 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2662 chk_length, net, high_tsn, &abort_flag, &break_flag,
2663 last_chunk, ch->chunk_type)) {
2671 * Set because of out of rwnd space and no
2672 * drop rep space left.
2678 /* not a data chunk in the data region */
2679 switch (ch->chunk_type) {
2680 case SCTP_INITIATION:
2681 case SCTP_INITIATION_ACK:
2682 case SCTP_SELECTIVE_ACK:
2683 case SCTP_NR_SELECTIVE_ACK:
2684 case SCTP_HEARTBEAT_REQUEST:
2685 case SCTP_HEARTBEAT_ACK:
2686 case SCTP_ABORT_ASSOCIATION:
2688 case SCTP_SHUTDOWN_ACK:
2689 case SCTP_OPERATION_ERROR:
2690 case SCTP_COOKIE_ECHO:
2691 case SCTP_COOKIE_ACK:
2694 case SCTP_SHUTDOWN_COMPLETE:
2695 case SCTP_AUTHENTICATION:
2696 case SCTP_ASCONF_ACK:
2697 case SCTP_PACKET_DROPPED:
2698 case SCTP_STREAM_RESET:
2699 case SCTP_FORWARD_CUM_TSN:
2703 * Now, what do we do with KNOWN
2704 * chunks that are NOT in the right
2707 * For now, I do nothing but ignore
2708 * them. We may later want to add
2709 * sysctl stuff to switch out and do
2710 * either an ABORT() or possibly
2713 struct mbuf *op_err;
2714 char msg[SCTP_DIAG_INFO_LEN];
2716 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2718 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2719 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2723 /* unknown chunk type, use bit rules */
2724 if (ch->chunk_type & 0x40) {
2725 /* Add a error report to the queue */
2726 struct mbuf *op_err;
2727 struct sctp_gen_error_cause *cause;
2729 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2730 0, M_NOWAIT, 1, MT_DATA);
2731 if (op_err != NULL) {
2732 cause = mtod(op_err, struct sctp_gen_error_cause *);
2733 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2734 cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2735 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2736 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2737 if (SCTP_BUF_NEXT(op_err) != NULL) {
2738 sctp_queue_op_err(stcb, op_err);
2740 sctp_m_freem(op_err);
2744 if ((ch->chunk_type & 0x80) == 0) {
2745 /* discard the rest of this packet */
2747 } /* else skip this bad chunk and
2748 * continue... */ break;
2749 } /* switch of chunk type */
2751 *offset += SCTP_SIZE32(chk_length);
2752 if ((*offset >= length) || stop_proc) {
2753 /* no more data left in the mbuf chain */
2757 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2758 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2767 * we need to report rwnd overrun drops.
2769 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2773 * Did we get data, if so update the time for auto-close and
2774 * give peer credit for being alive.
2776 SCTP_STAT_INCR(sctps_recvpktwithdata);
2777 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2778 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2779 stcb->asoc.overall_error_count,
2781 SCTP_FROM_SCTP_INDATA,
2784 stcb->asoc.overall_error_count = 0;
2785 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2787 /* now service all of the reassm queue if needed */
2788 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2789 /* Assure that we ack right away */
2790 stcb->asoc.send_sack = 1;
2792 /* Start a sack timer or QUEUE a SACK for sending */
2793 sctp_sack_check(stcb, was_a_gap);
2798 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2799 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2801 uint32_t * biggest_newly_acked_tsn,
2802 uint32_t * this_sack_lowest_newack,
2805 struct sctp_tmit_chunk *tp1;
2806 unsigned int theTSN;
2807 int j, wake_him = 0, circled = 0;
2809 /* Recover the tp1 we last saw */
2812 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2814 for (j = frag_strt; j <= frag_end; j++) {
2815 theTSN = j + last_tsn;
2817 if (tp1->rec.data.doing_fast_retransmit)
2821 * CMT: CUCv2 algorithm. For each TSN being
2822 * processed from the sent queue, track the
2823 * next expected pseudo-cumack, or
2824 * rtx_pseudo_cumack, if required. Separate
2825 * cumack trackers for first transmissions,
2826 * and retransmissions.
2828 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2829 (tp1->whoTo->find_pseudo_cumack == 1) &&
2830 (tp1->snd_count == 1)) {
2831 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2832 tp1->whoTo->find_pseudo_cumack = 0;
2834 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2835 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2836 (tp1->snd_count > 1)) {
2837 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2838 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2840 if (tp1->rec.data.tsn == theTSN) {
2841 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2843 * must be held until
2846 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2848 * If it is less than RESEND, it is
2849 * now no-longer in flight.
2850 * Higher values may already be set
2851 * via previous Gap Ack Blocks...
2852 * i.e. ACKED or RESEND.
2854 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2855 *biggest_newly_acked_tsn)) {
2856 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
2859 * CMT: SFR algo (and HTNA) - set
2860 * saw_newack to 1 for dest being
2861 * newly acked. update
2862 * this_sack_highest_newack if
2865 if (tp1->rec.data.chunk_was_revoked == 0)
2866 tp1->whoTo->saw_newack = 1;
2868 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2869 tp1->whoTo->this_sack_highest_newack)) {
2870 tp1->whoTo->this_sack_highest_newack =
2874 * CMT DAC algo: also update
2875 * this_sack_lowest_newack
2877 if (*this_sack_lowest_newack == 0) {
2878 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2879 sctp_log_sack(*this_sack_lowest_newack,
2884 SCTP_LOG_TSN_ACKED);
2886 *this_sack_lowest_newack = tp1->rec.data.tsn;
2889 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2890 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2891 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2892 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2893 * Separate pseudo_cumack trackers for first transmissions and
2896 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
2897 if (tp1->rec.data.chunk_was_revoked == 0) {
2898 tp1->whoTo->new_pseudo_cumack = 1;
2900 tp1->whoTo->find_pseudo_cumack = 1;
2902 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2903 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
2905 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
2906 if (tp1->rec.data.chunk_was_revoked == 0) {
2907 tp1->whoTo->new_pseudo_cumack = 1;
2909 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2911 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2912 sctp_log_sack(*biggest_newly_acked_tsn,
2917 SCTP_LOG_TSN_ACKED);
2919 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2920 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2921 tp1->whoTo->flight_size,
2923 (uint32_t) (uintptr_t) tp1->whoTo,
2926 sctp_flight_size_decrease(tp1);
2927 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2928 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2931 sctp_total_flight_decrease(stcb, tp1);
2933 tp1->whoTo->net_ack += tp1->send_size;
2934 if (tp1->snd_count < 2) {
2936 * True non-retransmited chunk
2938 tp1->whoTo->net_ack2 += tp1->send_size;
2946 sctp_calculate_rto(stcb,
2949 &tp1->sent_rcv_time,
2950 sctp_align_safe_nocopy,
2951 SCTP_RTT_FROM_DATA);
2954 if (tp1->whoTo->rto_needed == 0) {
2955 tp1->whoTo->rto_needed = 1;
2961 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2962 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2963 stcb->asoc.this_sack_highest_gap)) {
2964 stcb->asoc.this_sack_highest_gap =
2967 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2968 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2969 #ifdef SCTP_AUDITING_ENABLED
2970 sctp_audit_log(0xB2,
2971 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2976 * All chunks NOT UNSENT fall through here and are marked
2977 * (leave PR-SCTP ones that are to skip alone though)
2979 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2980 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2981 tp1->sent = SCTP_DATAGRAM_MARKED;
2983 if (tp1->rec.data.chunk_was_revoked) {
2984 /* deflate the cwnd */
2985 tp1->whoTo->cwnd -= tp1->book_size;
2986 tp1->rec.data.chunk_was_revoked = 0;
2988 /* NR Sack code here */
2990 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2991 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
2992 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
2995 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
2998 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
2999 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3000 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3001 stcb->asoc.trigger_reset = 1;
3003 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3007 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3008 sctp_m_freem(tp1->data);
3015 } /* if (tp1->tsn == theTSN) */ if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3018 tp1 = TAILQ_NEXT(tp1, sctp_next);
3019 if ((tp1 == NULL) && (circled == 0)) {
3021 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3023 } /* end while (tp1) */
3026 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3028 /* In case the fragments were not in order we must reset */
3029 } /* end for (j = fragStart */
3031 return (wake_him); /* Return value only used for nr-sack */
3036 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3037 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3038 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3039 int num_seg, int num_nr_seg, int *rto_ok)
3041 struct sctp_gap_ack_block *frag, block;
3042 struct sctp_tmit_chunk *tp1;
3047 uint16_t frag_strt, frag_end, prev_frag_end;
3049 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3053 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3056 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3058 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3059 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3060 *offset += sizeof(block);
3062 return (chunk_freed);
3064 frag_strt = ntohs(frag->start);
3065 frag_end = ntohs(frag->end);
3067 if (frag_strt > frag_end) {
3068 /* This gap report is malformed, skip it. */
3071 if (frag_strt <= prev_frag_end) {
3072 /* This gap report is not in order, so restart. */
3073 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3075 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3076 *biggest_tsn_acked = last_tsn + frag_end;
3083 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3084 non_revocable, &num_frs, biggest_newly_acked_tsn,
3085 this_sack_lowest_newack, rto_ok)) {
3088 prev_frag_end = frag_end;
3090 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3092 sctp_log_fr(*biggest_tsn_acked,
3093 *biggest_newly_acked_tsn,
3094 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3096 return (chunk_freed);
3100 sctp_check_for_revoked(struct sctp_tcb *stcb,
3101 struct sctp_association *asoc, uint32_t cumack,
3102 uint32_t biggest_tsn_acked)
3104 struct sctp_tmit_chunk *tp1;
3106 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3107 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3109 * ok this guy is either ACK or MARKED. If it is
3110 * ACKED it has been previously acked but not this
3111 * time i.e. revoked. If it is MARKED it was ACK'ed
3114 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3117 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3118 /* it has been revoked */
3119 tp1->sent = SCTP_DATAGRAM_SENT;
3120 tp1->rec.data.chunk_was_revoked = 1;
3122 * We must add this stuff back in to assure
3123 * timers and such get started.
3125 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3126 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3127 tp1->whoTo->flight_size,
3129 (uint32_t) (uintptr_t) tp1->whoTo,
3132 sctp_flight_size_increase(tp1);
3133 sctp_total_flight_increase(stcb, tp1);
3135 * We inflate the cwnd to compensate for our
3136 * artificial inflation of the flight_size.
3138 tp1->whoTo->cwnd += tp1->book_size;
3139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3140 sctp_log_sack(asoc->last_acked_seq,
3145 SCTP_LOG_TSN_REVOKED);
3147 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3148 /* it has been re-acked in this SACK */
3149 tp1->sent = SCTP_DATAGRAM_ACKED;
3152 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3159 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3160 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3162 struct sctp_tmit_chunk *tp1;
3163 int strike_flag = 0;
3165 int tot_retrans = 0;
3166 uint32_t sending_seq;
3167 struct sctp_nets *net;
3168 int num_dests_sacked = 0;
3171 * select the sending_seq, this is either the next thing ready to be
3172 * sent but not transmitted, OR, the next seq we assign.
3174 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3176 sending_seq = asoc->sending_seq;
3178 sending_seq = tp1->rec.data.tsn;
3181 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3182 if ((asoc->sctp_cmt_on_off > 0) &&
3183 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3184 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3185 if (net->saw_newack)
3189 if (stcb->asoc.prsctp_supported) {
3190 (void)SCTP_GETTIME_TIMEVAL(&now);
3192 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3194 if (tp1->no_fr_allowed) {
3195 /* this one had a timeout or something */
3198 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3199 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3200 sctp_log_fr(biggest_tsn_newly_acked,
3203 SCTP_FR_LOG_CHECK_STRIKE);
3205 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3206 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3210 if (stcb->asoc.prsctp_supported) {
3211 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3212 /* Is it expired? */
3213 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3214 /* Yes so drop it */
3215 if (tp1->data != NULL) {
3216 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3217 SCTP_SO_NOT_LOCKED);
3223 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
3224 /* we are beyond the tsn in the sack */
3227 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3228 /* either a RESEND, ACKED, or MARKED */
3230 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3231 /* Continue strikin FWD-TSN chunks */
3232 tp1->rec.data.fwd_tsn_cnt++;
3237 * CMT : SFR algo (covers part of DAC and HTNA as well)
3239 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3241 * No new acks were receieved for data sent to this
3242 * dest. Therefore, according to the SFR algo for
3243 * CMT, no data sent to this dest can be marked for
3244 * FR using this SACK.
3247 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
3248 tp1->whoTo->this_sack_highest_newack)) {
3250 * CMT: New acks were receieved for data sent to
3251 * this dest. But no new acks were seen for data
3252 * sent after tp1. Therefore, according to the SFR
3253 * algo for CMT, tp1 cannot be marked for FR using
3254 * this SACK. This step covers part of the DAC algo
3255 * and the HTNA algo as well.
3260 * Here we check to see if we were have already done a FR
3261 * and if so we see if the biggest TSN we saw in the sack is
3262 * smaller than the recovery point. If so we don't strike
3263 * the tsn... otherwise we CAN strike the TSN.
3266 * @@@ JRI: Check for CMT if (accum_moved &&
3267 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3270 if (accum_moved && asoc->fast_retran_loss_recovery) {
3272 * Strike the TSN if in fast-recovery and cum-ack
3275 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3276 sctp_log_fr(biggest_tsn_newly_acked,
3279 SCTP_FR_LOG_STRIKE_CHUNK);
3281 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3284 if ((asoc->sctp_cmt_on_off > 0) &&
3285 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3287 * CMT DAC algorithm: If SACK flag is set to
3288 * 0, then lowest_newack test will not pass
3289 * because it would have been set to the
3290 * cumack earlier. If not already to be
3291 * rtx'd, If not a mixed sack and if tp1 is
3292 * not between two sacked TSNs, then mark by
3293 * one more. NOTE that we are marking by one
3294 * additional time since the SACK DAC flag
3295 * indicates that two packets have been
3296 * received after this missing TSN.
3298 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3299 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3301 sctp_log_fr(16 + num_dests_sacked,
3304 SCTP_FR_LOG_STRIKE_CHUNK);
3309 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3310 (asoc->sctp_cmt_on_off == 0)) {
3312 * For those that have done a FR we must take
3313 * special consideration if we strike. I.e the
3314 * biggest_newly_acked must be higher than the
3315 * sending_seq at the time we did the FR.
3318 #ifdef SCTP_FR_TO_ALTERNATE
3320 * If FR's go to new networks, then we must only do
3321 * this for singly homed asoc's. However if the FR's
3322 * go to the same network (Armando's work) then its
3323 * ok to FR multiple times.
3331 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3332 tp1->rec.data.fast_retran_tsn)) {
3334 * Strike the TSN, since this ack is
3335 * beyond where things were when we
3338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3339 sctp_log_fr(biggest_tsn_newly_acked,
3342 SCTP_FR_LOG_STRIKE_CHUNK);
3344 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3348 if ((asoc->sctp_cmt_on_off > 0) &&
3349 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3351 * CMT DAC algorithm: If
3352 * SACK flag is set to 0,
3353 * then lowest_newack test
3354 * will not pass because it
3355 * would have been set to
3356 * the cumack earlier. If
3357 * not already to be rtx'd,
3358 * If not a mixed sack and
3359 * if tp1 is not between two
3360 * sacked TSNs, then mark by
3361 * one more. NOTE that we
3362 * are marking by one
3363 * additional time since the
3364 * SACK DAC flag indicates
3365 * that two packets have
3366 * been received after this
3369 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3370 (num_dests_sacked == 1) &&
3371 SCTP_TSN_GT(this_sack_lowest_newack,
3372 tp1->rec.data.tsn)) {
3373 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3374 sctp_log_fr(32 + num_dests_sacked,
3377 SCTP_FR_LOG_STRIKE_CHUNK);
3379 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3387 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3390 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3391 biggest_tsn_newly_acked)) {
3393 * We don't strike these: This is the HTNA
3394 * algorithm i.e. we don't strike If our TSN is
3395 * larger than the Highest TSN Newly Acked.
3399 /* Strike the TSN */
3400 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3401 sctp_log_fr(biggest_tsn_newly_acked,
3404 SCTP_FR_LOG_STRIKE_CHUNK);
3406 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3409 if ((asoc->sctp_cmt_on_off > 0) &&
3410 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3412 * CMT DAC algorithm: If SACK flag is set to
3413 * 0, then lowest_newack test will not pass
3414 * because it would have been set to the
3415 * cumack earlier. If not already to be
3416 * rtx'd, If not a mixed sack and if tp1 is
3417 * not between two sacked TSNs, then mark by
3418 * one more. NOTE that we are marking by one
3419 * additional time since the SACK DAC flag
3420 * indicates that two packets have been
3421 * received after this missing TSN.
3423 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3424 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3425 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3426 sctp_log_fr(48 + num_dests_sacked,
3429 SCTP_FR_LOG_STRIKE_CHUNK);
3435 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3436 struct sctp_nets *alt;
3438 /* fix counts and things */
3439 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3440 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3441 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3443 (uint32_t) (uintptr_t) tp1->whoTo,
3447 tp1->whoTo->net_ack++;
3448 sctp_flight_size_decrease(tp1);
3449 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3450 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3454 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3455 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3456 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3458 /* add back to the rwnd */
3459 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3461 /* remove from the total flight */
3462 sctp_total_flight_decrease(stcb, tp1);
3464 if ((stcb->asoc.prsctp_supported) &&
3465 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3466 /* Has it been retransmitted tv_sec times? -
3467 * we store the retran count there. */
3468 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3469 /* Yes, so drop it */
3470 if (tp1->data != NULL) {
3471 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3472 SCTP_SO_NOT_LOCKED);
3474 /* Make sure to flag we had a FR */
3475 tp1->whoTo->net_ack++;
3479 /* SCTP_PRINTF("OK, we are now ready to FR this
3481 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3482 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3486 /* This is a subsequent FR */
3487 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3489 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3490 if (asoc->sctp_cmt_on_off > 0) {
3492 * CMT: Using RTX_SSTHRESH policy for CMT.
3493 * If CMT is being used, then pick dest with
3494 * largest ssthresh for any retransmission.
3496 tp1->no_fr_allowed = 1;
3498 /* sa_ignore NO_NULL_CHK */
3499 if (asoc->sctp_cmt_pf > 0) {
3500 /* JRS 5/18/07 - If CMT PF is on,
3501 * use the PF version of
3503 alt = sctp_find_alternate_net(stcb, alt, 2);
3505 /* JRS 5/18/07 - If only CMT is on,
3506 * use the CMT version of
3508 /* sa_ignore NO_NULL_CHK */
3509 alt = sctp_find_alternate_net(stcb, alt, 1);
3515 * CUCv2: If a different dest is picked for
3516 * the retransmission, then new
3517 * (rtx-)pseudo_cumack needs to be tracked
3518 * for orig dest. Let CUCv2 track new (rtx-)
3519 * pseudo-cumack always.
3522 tp1->whoTo->find_pseudo_cumack = 1;
3523 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3525 } else {/* CMT is OFF */
3527 #ifdef SCTP_FR_TO_ALTERNATE
3528 /* Can we find an alternate? */
3529 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3532 * default behavior is to NOT retransmit
3533 * FR's to an alternate. Armando Caro's
3534 * paper details why.
3540 tp1->rec.data.doing_fast_retransmit = 1;
3542 /* mark the sending seq for possible subsequent FR's */
3544 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3545 * (uint32_t)tpi->rec.data.tsn);
3547 if (TAILQ_EMPTY(&asoc->send_queue)) {
3549 * If the queue of send is empty then its
3550 * the next sequence number that will be
3551 * assigned so we subtract one from this to
3552 * get the one we last sent.
3554 tp1->rec.data.fast_retran_tsn = sending_seq;
3557 * If there are chunks on the send queue
3558 * (unsent data that has made it from the
3559 * stream queues but not out the door, we
3560 * take the first one (which will have the
3561 * lowest TSN) and subtract one to get the
3564 struct sctp_tmit_chunk *ttt;
3566 ttt = TAILQ_FIRST(&asoc->send_queue);
3567 tp1->rec.data.fast_retran_tsn =
3573 * this guy had a RTO calculation pending on
3576 if ((tp1->whoTo != NULL) &&
3577 (tp1->whoTo->rto_needed == 0)) {
3578 tp1->whoTo->rto_needed = 1;
3582 if (alt != tp1->whoTo) {
3583 /* yes, there is an alternate. */
3584 sctp_free_remote_addr(tp1->whoTo);
3585 /* sa_ignore FREED_MEMORY */
3587 atomic_add_int(&alt->ref_count, 1);
3593 struct sctp_tmit_chunk *
3594 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3595 struct sctp_association *asoc)
3597 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3601 if (asoc->prsctp_supported == 0) {
3604 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3605 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3606 tp1->sent != SCTP_DATAGRAM_RESEND &&
3607 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3608 /* no chance to advance, out of here */
3611 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3612 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3613 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3614 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3615 asoc->advanced_peer_ack_point,
3616 tp1->rec.data.tsn, 0, 0);
3619 if (!PR_SCTP_ENABLED(tp1->flags)) {
3621 * We can't fwd-tsn past any that are reliable aka
3622 * retransmitted until the asoc fails.
3627 (void)SCTP_GETTIME_TIMEVAL(&now);
3631 * now we got a chunk which is marked for another
3632 * retransmission to a PR-stream but has run out its chances
3633 * already maybe OR has been marked to skip now. Can we skip
3634 * it if its a resend?
3636 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3637 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3639 * Now is this one marked for resend and its time is
3642 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3643 /* Yes so drop it */
3645 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3646 1, SCTP_SO_NOT_LOCKED);
3650 * No, we are done when hit one for resend
3651 * whos time as not expired.
3657 * Ok now if this chunk is marked to drop it we can clean up
3658 * the chunk, advance our peer ack point and we can check
3661 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3662 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3663 /* advance PeerAckPoint goes forward */
3664 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3665 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3667 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3668 /* No update but we do save the chk */
3673 * If it is still in RESEND we can advance no
3683 sctp_fs_audit(struct sctp_association *asoc)
3685 struct sctp_tmit_chunk *chk;
3686 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3689 int entry_flight, entry_cnt;
3694 entry_flight = asoc->total_flight;
3695 entry_cnt = asoc->total_flight_count;
3697 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3700 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3701 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3702 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3707 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3709 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3711 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3718 if ((inflight > 0) || (inbetween > 0)) {
3720 panic("Flight size-express incorrect? \n");
3722 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3723 entry_flight, entry_cnt);
3725 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3726 inflight, inbetween, resend, above, acked);
3735 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3736 struct sctp_association *asoc,
3737 struct sctp_tmit_chunk *tp1)
3739 tp1->window_probe = 0;
3740 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3741 /* TSN's skipped we do NOT move back. */
3742 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3743 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3745 (uint32_t) (uintptr_t) tp1->whoTo,
3749 /* First setup this by shrinking flight */
3750 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3751 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3754 sctp_flight_size_decrease(tp1);
3755 sctp_total_flight_decrease(stcb, tp1);
3756 /* Now mark for resend */
3757 tp1->sent = SCTP_DATAGRAM_RESEND;
3758 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3760 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3761 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3762 tp1->whoTo->flight_size,
3764 (uint32_t) (uintptr_t) tp1->whoTo,
3770 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3771 uint32_t rwnd, int *abort_now, int ecne_seen)
3773 struct sctp_nets *net;
3774 struct sctp_association *asoc;
3775 struct sctp_tmit_chunk *tp1, *tp2;
3777 int win_probe_recovery = 0;
3778 int win_probe_recovered = 0;
3779 int j, done_once = 0;
3783 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3784 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3785 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3787 SCTP_TCB_LOCK_ASSERT(stcb);
3788 #ifdef SCTP_ASOCLOG_OF_TSNS
3789 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3790 stcb->asoc.cumack_log_at++;
3791 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3792 stcb->asoc.cumack_log_at = 0;
3796 old_rwnd = asoc->peers_rwnd;
3797 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3800 } else if (asoc->last_acked_seq == cumack) {
3801 /* Window update sack */
3802 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3803 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3804 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3805 /* SWS sender side engages */
3806 asoc->peers_rwnd = 0;
3808 if (asoc->peers_rwnd > old_rwnd) {
3813 /* First setup for CC stuff */
3814 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3815 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3816 /* Drag along the window_tsn for cwr's */
3817 net->cwr_window_tsn = cumack;
3819 net->prev_cwnd = net->cwnd;
3824 * CMT: Reset CUC and Fast recovery algo variables before
3827 net->new_pseudo_cumack = 0;
3828 net->will_exit_fast_recovery = 0;
3829 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3830 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3833 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3834 tp1 = TAILQ_LAST(&asoc->sent_queue,
3835 sctpchunk_listhead);
3836 send_s = tp1->rec.data.tsn + 1;
3838 send_s = asoc->sending_seq;
3840 if (SCTP_TSN_GE(cumack, send_s)) {
3841 struct mbuf *op_err;
3842 char msg[SCTP_DIAG_INFO_LEN];
3846 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3848 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3849 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3850 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3853 asoc->this_sack_highest_gap = cumack;
3854 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3855 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3856 stcb->asoc.overall_error_count,
3858 SCTP_FROM_SCTP_INDATA,
3861 stcb->asoc.overall_error_count = 0;
3862 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3863 /* process the new consecutive TSN first */
3864 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3865 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
3866 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3867 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3869 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3871 * If it is less than ACKED, it is
3872 * now no-longer in flight. Higher
3873 * values may occur during marking
3875 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3876 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3877 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3878 tp1->whoTo->flight_size,
3880 (uint32_t) (uintptr_t) tp1->whoTo,
3883 sctp_flight_size_decrease(tp1);
3884 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3885 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3888 /* sa_ignore NO_NULL_CHK */
3889 sctp_total_flight_decrease(stcb, tp1);
3891 tp1->whoTo->net_ack += tp1->send_size;
3892 if (tp1->snd_count < 2) {
3894 * True non-retransmited
3897 tp1->whoTo->net_ack2 +=
3900 /* update RTO too? */
3908 sctp_calculate_rto(stcb,
3910 &tp1->sent_rcv_time,
3911 sctp_align_safe_nocopy,
3912 SCTP_RTT_FROM_DATA);
3915 if (tp1->whoTo->rto_needed == 0) {
3916 tp1->whoTo->rto_needed = 1;
3922 * CMT: CUCv2 algorithm. From the
3923 * cumack'd TSNs, for each TSN being
3924 * acked for the first time, set the
3925 * following variables for the
3926 * corresp destination.
3927 * new_pseudo_cumack will trigger a
3929 * find_(rtx_)pseudo_cumack will
3930 * trigger search for the next
3931 * expected (rtx-)pseudo-cumack.
3933 tp1->whoTo->new_pseudo_cumack = 1;
3934 tp1->whoTo->find_pseudo_cumack = 1;
3935 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3937 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3938 /* sa_ignore NO_NULL_CHK */
3939 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3942 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3943 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3945 if (tp1->rec.data.chunk_was_revoked) {
3946 /* deflate the cwnd */
3947 tp1->whoTo->cwnd -= tp1->book_size;
3948 tp1->rec.data.chunk_was_revoked = 0;
3950 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3951 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3952 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
3955 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3959 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3960 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3961 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
3962 asoc->trigger_reset = 1;
3964 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3966 /* sa_ignore NO_NULL_CHK */
3967 sctp_free_bufspace(stcb, asoc, tp1, 1);
3968 sctp_m_freem(tp1->data);
3971 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3972 sctp_log_sack(asoc->last_acked_seq,
3977 SCTP_LOG_FREE_SENT);
3979 asoc->sent_queue_cnt--;
3980 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3987 /* sa_ignore NO_NULL_CHK */
3988 if (stcb->sctp_socket) {
3989 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3993 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3994 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3995 /* sa_ignore NO_NULL_CHK */
3996 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3998 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3999 so = SCTP_INP_SO(stcb->sctp_ep);
4000 atomic_add_int(&stcb->asoc.refcnt, 1);
4001 SCTP_TCB_UNLOCK(stcb);
4002 SCTP_SOCKET_LOCK(so, 1);
4003 SCTP_TCB_LOCK(stcb);
4004 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4005 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4006 /* assoc was freed while we were unlocked */
4007 SCTP_SOCKET_UNLOCK(so, 1);
4011 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4012 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4013 SCTP_SOCKET_UNLOCK(so, 1);
4016 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4017 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4021 /* JRS - Use the congestion control given in the CC module */
4022 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4023 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4024 if (net->net_ack2 > 0) {
4026 * Karn's rule applies to clearing error
4027 * count, this is optional.
4029 net->error_count = 0;
4030 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4031 /* addr came good */
4032 net->dest_state |= SCTP_ADDR_REACHABLE;
4033 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4034 0, (void *)net, SCTP_SO_NOT_LOCKED);
4036 if (net == stcb->asoc.primary_destination) {
4037 if (stcb->asoc.alternate) {
4038 /* release the alternate,
4039 * primary is good */
4040 sctp_free_remote_addr(stcb->asoc.alternate);
4041 stcb->asoc.alternate = NULL;
4044 if (net->dest_state & SCTP_ADDR_PF) {
4045 net->dest_state &= ~SCTP_ADDR_PF;
4046 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4047 stcb->sctp_ep, stcb, net,
4048 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4049 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4050 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4051 /* Done with this net */
4054 /* restore any doubled timers */
4055 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4056 if (net->RTO < stcb->asoc.minrto) {
4057 net->RTO = stcb->asoc.minrto;
4059 if (net->RTO > stcb->asoc.maxrto) {
4060 net->RTO = stcb->asoc.maxrto;
4064 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4066 asoc->last_acked_seq = cumack;
4068 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4069 /* nothing left in-flight */
4070 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4071 net->flight_size = 0;
4072 net->partial_bytes_acked = 0;
4074 asoc->total_flight = 0;
4075 asoc->total_flight_count = 0;
4078 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4079 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4080 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4081 /* SWS sender side engages */
4082 asoc->peers_rwnd = 0;
4084 if (asoc->peers_rwnd > old_rwnd) {
4085 win_probe_recovery = 1;
4087 /* Now assure a timer where data is queued at */
4090 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4093 if (win_probe_recovery && (net->window_probe)) {
4094 win_probe_recovered = 1;
4096 * Find first chunk that was used with window probe
4097 * and clear the sent
4099 /* sa_ignore FREED_MEMORY */
4100 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4101 if (tp1->window_probe) {
4102 /* move back to data send queue */
4103 sctp_window_probe_recovery(stcb, asoc, tp1);
4108 if (net->RTO == 0) {
4109 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4111 to_ticks = MSEC_TO_TICKS(net->RTO);
4113 if (net->flight_size) {
4115 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4116 sctp_timeout_handler, &net->rxt_timer);
4117 if (net->window_probe) {
4118 net->window_probe = 0;
4121 if (net->window_probe) {
4122 /* In window probes we must assure a timer
4123 * is still running there */
4124 net->window_probe = 0;
4125 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4126 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4127 sctp_timeout_handler, &net->rxt_timer);
4129 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4130 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4132 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4137 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4138 (asoc->sent_queue_retran_cnt == 0) &&
4139 (win_probe_recovered == 0) &&
4142 * huh, this should not happen unless all packets are
4143 * PR-SCTP and marked to skip of course.
4145 if (sctp_fs_audit(asoc)) {
4146 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4147 net->flight_size = 0;
4149 asoc->total_flight = 0;
4150 asoc->total_flight_count = 0;
4151 asoc->sent_queue_retran_cnt = 0;
4152 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4153 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4154 sctp_flight_size_increase(tp1);
4155 sctp_total_flight_increase(stcb, tp1);
4156 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4157 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4164 /**********************************/
4165 /* Now what about shutdown issues */
4166 /**********************************/
4167 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4168 /* nothing left on sendqueue.. consider done */
4170 if ((asoc->stream_queue_cnt == 1) &&
4171 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4172 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4173 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4174 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4176 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4177 (asoc->stream_queue_cnt == 0)) {
4178 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4179 /* Need to abort here */
4180 struct mbuf *op_err;
4185 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4186 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4187 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4190 struct sctp_nets *netp;
4192 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4193 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4194 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4196 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4197 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4198 sctp_stop_timers_for_shutdown(stcb);
4199 if (asoc->alternate) {
4200 netp = asoc->alternate;
4202 netp = asoc->primary_destination;
4204 sctp_send_shutdown(stcb, netp);
4205 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4206 stcb->sctp_ep, stcb, netp);
4207 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4208 stcb->sctp_ep, stcb, netp);
4210 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4211 (asoc->stream_queue_cnt == 0)) {
4212 struct sctp_nets *netp;
4214 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4217 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4218 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4219 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4220 sctp_stop_timers_for_shutdown(stcb);
4221 if (asoc->alternate) {
4222 netp = asoc->alternate;
4224 netp = asoc->primary_destination;
4226 sctp_send_shutdown_ack(stcb, netp);
4227 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4228 stcb->sctp_ep, stcb, netp);
4231 /*********************************************/
4232 /* Here we perform PR-SCTP procedures */
4234 /*********************************************/
4235 /* C1. update advancedPeerAckPoint */
4236 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4237 asoc->advanced_peer_ack_point = cumack;
4239 /* PR-Sctp issues need to be addressed too */
4240 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4241 struct sctp_tmit_chunk *lchk;
4242 uint32_t old_adv_peer_ack_point;
4244 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4245 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4246 /* C3. See if we need to send a Fwd-TSN */
4247 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4249 * ISSUE with ECN, see FWD-TSN processing.
4251 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4252 send_forward_tsn(stcb, asoc);
4254 /* try to FR fwd-tsn's that get lost too */
4255 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4256 send_forward_tsn(stcb, asoc);
4261 /* Assure a timer is up */
4262 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4263 stcb->sctp_ep, stcb, lchk->whoTo);
4266 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4267 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4269 stcb->asoc.peers_rwnd,
4270 stcb->asoc.total_flight,
4271 stcb->asoc.total_output_queue_size);
4276 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4277 struct sctp_tcb *stcb,
4278 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4279 int *abort_now, uint8_t flags,
4280 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4282 struct sctp_association *asoc;
4283 struct sctp_tmit_chunk *tp1, *tp2;
4284 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4285 uint16_t wake_him = 0;
4286 uint32_t send_s = 0;
4288 int accum_moved = 0;
4289 int will_exit_fast_recovery = 0;
4290 uint32_t a_rwnd, old_rwnd;
4291 int win_probe_recovery = 0;
4292 int win_probe_recovered = 0;
4293 struct sctp_nets *net = NULL;
4296 uint8_t reneged_all = 0;
4297 uint8_t cmt_dac_flag;
4300 * we take any chance we can to service our queues since we cannot
4301 * get awoken when the socket is read from :<
4304 * Now perform the actual SACK handling: 1) Verify that it is not an
4305 * old sack, if so discard. 2) If there is nothing left in the send
4306 * queue (cum-ack is equal to last acked) then you have a duplicate
4307 * too, update any rwnd change and verify no timers are running.
4308 * then return. 3) Process any new consequtive data i.e. cum-ack
4309 * moved process these first and note that it moved. 4) Process any
4310 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4311 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4312 * sync up flightsizes and things, stop all timers and also check
4313 * for shutdown_pending state. If so then go ahead and send off the
4314 * shutdown. If in shutdown recv, send off the shutdown-ack and
4315 * start that timer, Ret. 9) Strike any non-acked things and do FR
4316 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4317 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4318 * if in shutdown_recv state.
4320 SCTP_TCB_LOCK_ASSERT(stcb);
4322 this_sack_lowest_newack = 0;
4323 SCTP_STAT_INCR(sctps_slowpath_sack);
4325 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4326 #ifdef SCTP_ASOCLOG_OF_TSNS
4327 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4328 stcb->asoc.cumack_log_at++;
4329 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4330 stcb->asoc.cumack_log_at = 0;
4335 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4336 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4337 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4339 old_rwnd = stcb->asoc.peers_rwnd;
4340 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4341 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4342 stcb->asoc.overall_error_count,
4344 SCTP_FROM_SCTP_INDATA,
4347 stcb->asoc.overall_error_count = 0;
4349 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4350 sctp_log_sack(asoc->last_acked_seq,
4357 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4359 uint32_t *dupdata, dblock;
4361 for (i = 0; i < num_dup; i++) {
4362 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4363 sizeof(uint32_t), (uint8_t *) & dblock);
4364 if (dupdata == NULL) {
4367 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4371 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4372 tp1 = TAILQ_LAST(&asoc->sent_queue,
4373 sctpchunk_listhead);
4374 send_s = tp1->rec.data.tsn + 1;
4377 send_s = asoc->sending_seq;
4379 if (SCTP_TSN_GE(cum_ack, send_s)) {
4380 struct mbuf *op_err;
4381 char msg[SCTP_DIAG_INFO_LEN];
4384 * no way, we have not even sent this TSN out yet. Peer is
4385 * hopelessly messed up with us.
4387 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4390 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4391 tp1->rec.data.tsn, (void *)tp1);
4396 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4398 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4399 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4400 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4403 /**********************/
4404 /* 1) check the range */
4405 /**********************/
4406 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4407 /* acking something behind */
4410 /* update the Rwnd of the peer */
4411 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4412 TAILQ_EMPTY(&asoc->send_queue) &&
4413 (asoc->stream_queue_cnt == 0)) {
4414 /* nothing left on send/sent and strmq */
4415 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4416 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4417 asoc->peers_rwnd, 0, 0, a_rwnd);
4419 asoc->peers_rwnd = a_rwnd;
4420 if (asoc->sent_queue_retran_cnt) {
4421 asoc->sent_queue_retran_cnt = 0;
4423 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4424 /* SWS sender side engages */
4425 asoc->peers_rwnd = 0;
4427 /* stop any timers */
4428 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4429 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4430 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4431 net->partial_bytes_acked = 0;
4432 net->flight_size = 0;
4434 asoc->total_flight = 0;
4435 asoc->total_flight_count = 0;
4439 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4440 * things. The total byte count acked is tracked in netAckSz AND
4441 * netAck2 is used to track the total bytes acked that are un-
4442 * amibguious and were never retransmitted. We track these on a per
4443 * destination address basis.
4445 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4446 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4447 /* Drag along the window_tsn for cwr's */
4448 net->cwr_window_tsn = cum_ack;
4450 net->prev_cwnd = net->cwnd;
4455 * CMT: Reset CUC and Fast recovery algo variables before
4458 net->new_pseudo_cumack = 0;
4459 net->will_exit_fast_recovery = 0;
4460 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4461 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4464 /* process the new consecutive TSN first */
4465 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4466 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4467 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4469 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4471 * If it is less than ACKED, it is
4472 * now no-longer in flight. Higher
4473 * values may occur during marking
4475 if ((tp1->whoTo->dest_state &
4476 SCTP_ADDR_UNCONFIRMED) &&
4477 (tp1->snd_count < 2)) {
4479 * If there was no retran
4480 * and the address is
4481 * un-confirmed and we sent
4483 * sacked.. its confirmed,
4486 tp1->whoTo->dest_state &=
4487 ~SCTP_ADDR_UNCONFIRMED;
4489 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4490 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4491 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4492 tp1->whoTo->flight_size,
4494 (uint32_t) (uintptr_t) tp1->whoTo,
4497 sctp_flight_size_decrease(tp1);
4498 sctp_total_flight_decrease(stcb, tp1);
4499 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4500 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4504 tp1->whoTo->net_ack += tp1->send_size;
4506 /* CMT SFR and DAC algos */
4507 this_sack_lowest_newack = tp1->rec.data.tsn;
4508 tp1->whoTo->saw_newack = 1;
4510 if (tp1->snd_count < 2) {
4512 * True non-retransmited
4515 tp1->whoTo->net_ack2 +=
4518 /* update RTO too? */
4522 sctp_calculate_rto(stcb,
4524 &tp1->sent_rcv_time,
4525 sctp_align_safe_nocopy,
4526 SCTP_RTT_FROM_DATA);
4529 if (tp1->whoTo->rto_needed == 0) {
4530 tp1->whoTo->rto_needed = 1;
4536 * CMT: CUCv2 algorithm. From the
4537 * cumack'd TSNs, for each TSN being
4538 * acked for the first time, set the
4539 * following variables for the
4540 * corresp destination.
4541 * new_pseudo_cumack will trigger a
4543 * find_(rtx_)pseudo_cumack will
4544 * trigger search for the next
4545 * expected (rtx-)pseudo-cumack.
4547 tp1->whoTo->new_pseudo_cumack = 1;
4548 tp1->whoTo->find_pseudo_cumack = 1;
4549 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4552 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4553 sctp_log_sack(asoc->last_acked_seq,
4558 SCTP_LOG_TSN_ACKED);
4560 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4561 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4564 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4565 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4566 #ifdef SCTP_AUDITING_ENABLED
4567 sctp_audit_log(0xB3,
4568 (asoc->sent_queue_retran_cnt & 0x000000ff));
4571 if (tp1->rec.data.chunk_was_revoked) {
4572 /* deflate the cwnd */
4573 tp1->whoTo->cwnd -= tp1->book_size;
4574 tp1->rec.data.chunk_was_revoked = 0;
4576 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4577 tp1->sent = SCTP_DATAGRAM_ACKED;
4584 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4585 /* always set this up to cum-ack */
4586 asoc->this_sack_highest_gap = last_tsn;
4588 if ((num_seg > 0) || (num_nr_seg > 0)) {
4591 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4592 * to be greater than the cumack. Also reset saw_newack to 0
4595 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4596 net->saw_newack = 0;
4597 net->this_sack_highest_newack = last_tsn;
4601 * thisSackHighestGap will increase while handling NEW
4602 * segments this_sack_highest_newack will increase while
4603 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4604 * used for CMT DAC algo. saw_newack will also change.
4606 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4607 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4608 num_seg, num_nr_seg, &rto_ok)) {
4612 * validate the biggest_tsn_acked in the gap acks if strict
4613 * adherence is wanted.
4615 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4617 * peer is either confused or we are under attack.
4620 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4621 biggest_tsn_acked, send_s);
4625 /*******************************************/
4626 /* cancel ALL T3-send timer if accum moved */
4627 /*******************************************/
4628 if (asoc->sctp_cmt_on_off > 0) {
4629 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4630 if (net->new_pseudo_cumack)
4631 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4633 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4638 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4639 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4640 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4644 /********************************************/
4645 /* drop the acked chunks from the sentqueue */
4646 /********************************************/
4647 asoc->last_acked_seq = cum_ack;
4649 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4650 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4653 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4654 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4655 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4658 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4662 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4663 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4664 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4665 asoc->trigger_reset = 1;
4667 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4668 if (PR_SCTP_ENABLED(tp1->flags)) {
4669 if (asoc->pr_sctp_cnt != 0)
4670 asoc->pr_sctp_cnt--;
4672 asoc->sent_queue_cnt--;
4674 /* sa_ignore NO_NULL_CHK */
4675 sctp_free_bufspace(stcb, asoc, tp1, 1);
4676 sctp_m_freem(tp1->data);
4678 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4679 asoc->sent_queue_cnt_removeable--;
4682 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4683 sctp_log_sack(asoc->last_acked_seq,
4688 SCTP_LOG_FREE_SENT);
4690 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4693 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4695 panic("Warning flight size is positive and should be 0");
4697 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4698 asoc->total_flight);
4700 asoc->total_flight = 0;
4702 /* sa_ignore NO_NULL_CHK */
4703 if ((wake_him) && (stcb->sctp_socket)) {
4704 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4708 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4709 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4710 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4712 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4713 so = SCTP_INP_SO(stcb->sctp_ep);
4714 atomic_add_int(&stcb->asoc.refcnt, 1);
4715 SCTP_TCB_UNLOCK(stcb);
4716 SCTP_SOCKET_LOCK(so, 1);
4717 SCTP_TCB_LOCK(stcb);
4718 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4719 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4720 /* assoc was freed while we were unlocked */
4721 SCTP_SOCKET_UNLOCK(so, 1);
4725 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4726 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4727 SCTP_SOCKET_UNLOCK(so, 1);
4730 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4731 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4735 if (asoc->fast_retran_loss_recovery && accum_moved) {
4736 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4737 /* Setup so we will exit RFC2582 fast recovery */
4738 will_exit_fast_recovery = 1;
4742 * Check for revoked fragments:
4744 * if Previous sack - Had no frags then we can't have any revoked if
4745 * Previous sack - Had frag's then - If we now have frags aka
4746 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4747 * some of them. else - The peer revoked all ACKED fragments, since
4748 * we had some before and now we have NONE.
4752 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4753 asoc->saw_sack_with_frags = 1;
4754 } else if (asoc->saw_sack_with_frags) {
4755 int cnt_revoked = 0;
4757 /* Peer revoked all dg's marked or acked */
4758 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4759 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4760 tp1->sent = SCTP_DATAGRAM_SENT;
4761 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4762 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4763 tp1->whoTo->flight_size,
4765 (uint32_t) (uintptr_t) tp1->whoTo,
4768 sctp_flight_size_increase(tp1);
4769 sctp_total_flight_increase(stcb, tp1);
4770 tp1->rec.data.chunk_was_revoked = 1;
4772 * To ensure that this increase in
4773 * flightsize, which is artificial, does not
4774 * throttle the sender, we also increase the
4775 * cwnd artificially.
4777 tp1->whoTo->cwnd += tp1->book_size;
4784 asoc->saw_sack_with_frags = 0;
4787 asoc->saw_sack_with_nr_frags = 1;
4789 asoc->saw_sack_with_nr_frags = 0;
4791 /* JRS - Use the congestion control given in the CC module */
4792 if (ecne_seen == 0) {
4793 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4794 if (net->net_ack2 > 0) {
4796 * Karn's rule applies to clearing error
4797 * count, this is optional.
4799 net->error_count = 0;
4800 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4801 /* addr came good */
4802 net->dest_state |= SCTP_ADDR_REACHABLE;
4803 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4804 0, (void *)net, SCTP_SO_NOT_LOCKED);
4806 if (net == stcb->asoc.primary_destination) {
4807 if (stcb->asoc.alternate) {
4808 /* release the alternate,
4809 * primary is good */
4810 sctp_free_remote_addr(stcb->asoc.alternate);
4811 stcb->asoc.alternate = NULL;
4814 if (net->dest_state & SCTP_ADDR_PF) {
4815 net->dest_state &= ~SCTP_ADDR_PF;
4816 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4817 stcb->sctp_ep, stcb, net,
4818 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4819 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4820 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4821 /* Done with this net */
4824 /* restore any doubled timers */
4825 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4826 if (net->RTO < stcb->asoc.minrto) {
4827 net->RTO = stcb->asoc.minrto;
4829 if (net->RTO > stcb->asoc.maxrto) {
4830 net->RTO = stcb->asoc.maxrto;
4834 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4836 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4837 /* nothing left in-flight */
4838 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4839 /* stop all timers */
4840 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4842 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4843 net->flight_size = 0;
4844 net->partial_bytes_acked = 0;
4846 asoc->total_flight = 0;
4847 asoc->total_flight_count = 0;
4849 /**********************************/
4850 /* Now what about shutdown issues */
4851 /**********************************/
4852 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4853 /* nothing left on sendqueue.. consider done */
4854 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4855 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4856 asoc->peers_rwnd, 0, 0, a_rwnd);
4858 asoc->peers_rwnd = a_rwnd;
4859 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4860 /* SWS sender side engages */
4861 asoc->peers_rwnd = 0;
4864 if ((asoc->stream_queue_cnt == 1) &&
4865 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4866 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4867 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4868 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4870 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4871 (asoc->stream_queue_cnt == 0)) {
4872 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4873 /* Need to abort here */
4874 struct mbuf *op_err;
4879 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4880 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4881 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4884 struct sctp_nets *netp;
4886 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4887 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4888 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4890 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4891 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4892 sctp_stop_timers_for_shutdown(stcb);
4893 if (asoc->alternate) {
4894 netp = asoc->alternate;
4896 netp = asoc->primary_destination;
4898 sctp_send_shutdown(stcb, netp);
4899 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4900 stcb->sctp_ep, stcb, netp);
4901 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4902 stcb->sctp_ep, stcb, netp);
4905 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4906 (asoc->stream_queue_cnt == 0)) {
4907 struct sctp_nets *netp;
4909 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4912 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4913 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4914 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4915 sctp_stop_timers_for_shutdown(stcb);
4916 if (asoc->alternate) {
4917 netp = asoc->alternate;
4919 netp = asoc->primary_destination;
4921 sctp_send_shutdown_ack(stcb, netp);
4922 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4923 stcb->sctp_ep, stcb, netp);
4928 * Now here we are going to recycle net_ack for a different use...
4931 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4936 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4937 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4938 * automatically ensure that.
4940 if ((asoc->sctp_cmt_on_off > 0) &&
4941 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4942 (cmt_dac_flag == 0)) {
4943 this_sack_lowest_newack = cum_ack;
4945 if ((num_seg > 0) || (num_nr_seg > 0)) {
4946 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4947 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4949 /* JRS - Use the congestion control given in the CC module */
4950 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4952 /* Now are we exiting loss recovery ? */
4953 if (will_exit_fast_recovery) {
4954 /* Ok, we must exit fast recovery */
4955 asoc->fast_retran_loss_recovery = 0;
4957 if ((asoc->sat_t3_loss_recovery) &&
4958 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4959 /* end satellite t3 loss recovery */
4960 asoc->sat_t3_loss_recovery = 0;
4965 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4966 if (net->will_exit_fast_recovery) {
4967 /* Ok, we must exit fast recovery */
4968 net->fast_retran_loss_recovery = 0;
4972 /* Adjust and set the new rwnd value */
4973 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4974 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4975 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4977 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4978 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4979 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4980 /* SWS sender side engages */
4981 asoc->peers_rwnd = 0;
4983 if (asoc->peers_rwnd > old_rwnd) {
4984 win_probe_recovery = 1;
4987 * Now we must setup so we have a timer up for anyone with
4993 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4994 if (win_probe_recovery && (net->window_probe)) {
4995 win_probe_recovered = 1;
4997 * Find first chunk that was used with
4998 * window probe and clear the event. Put
4999 * it back into the send queue as if has
5002 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5003 if (tp1->window_probe) {
5004 sctp_window_probe_recovery(stcb, asoc, tp1);
5009 if (net->flight_size) {
5011 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5012 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5013 stcb->sctp_ep, stcb, net);
5015 if (net->window_probe) {
5016 net->window_probe = 0;
5019 if (net->window_probe) {
5020 /* In window probes we must assure a timer
5021 * is still running there */
5022 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5023 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5024 stcb->sctp_ep, stcb, net);
5027 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5028 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5030 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5035 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5036 (asoc->sent_queue_retran_cnt == 0) &&
5037 (win_probe_recovered == 0) &&
5040 * huh, this should not happen unless all packets are
5041 * PR-SCTP and marked to skip of course.
5043 if (sctp_fs_audit(asoc)) {
5044 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5045 net->flight_size = 0;
5047 asoc->total_flight = 0;
5048 asoc->total_flight_count = 0;
5049 asoc->sent_queue_retran_cnt = 0;
5050 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5051 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5052 sctp_flight_size_increase(tp1);
5053 sctp_total_flight_increase(stcb, tp1);
5054 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5055 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5062 /*********************************************/
5063 /* Here we perform PR-SCTP procedures */
5065 /*********************************************/
5066 /* C1. update advancedPeerAckPoint */
5067 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5068 asoc->advanced_peer_ack_point = cum_ack;
5070 /* C2. try to further move advancedPeerAckPoint ahead */
5071 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5072 struct sctp_tmit_chunk *lchk;
5073 uint32_t old_adv_peer_ack_point;
5075 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5076 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5077 /* C3. See if we need to send a Fwd-TSN */
5078 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5080 * ISSUE with ECN, see FWD-TSN processing.
5082 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5083 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5084 0xee, cum_ack, asoc->advanced_peer_ack_point,
5085 old_adv_peer_ack_point);
5087 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5088 send_forward_tsn(stcb, asoc);
5090 /* try to FR fwd-tsn's that get lost too */
5091 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5092 send_forward_tsn(stcb, asoc);
5097 /* Assure a timer is up */
5098 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5099 stcb->sctp_ep, stcb, lchk->whoTo);
5102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5103 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5105 stcb->asoc.peers_rwnd,
5106 stcb->asoc.total_flight,
5107 stcb->asoc.total_output_queue_size);
5112 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5115 uint32_t cum_ack, a_rwnd;
5117 cum_ack = ntohl(cp->cumulative_tsn_ack);
5118 /* Arrange so a_rwnd does NOT change */
5119 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5121 /* Now call the express sack handling */
5122 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5126 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5127 struct sctp_stream_in *strmin)
5129 struct sctp_queued_to_read *ctl, *nctl;
5130 struct sctp_association *asoc;
5132 int need_reasm_check = 0;
5135 mid = strmin->last_mid_delivered;
5137 * First deliver anything prior to and including the stream no that
5140 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5141 if (SCTP_MID_GE(asoc->idata_supported, mid, ctl->mid)) {
5142 /* this is deliverable now */
5143 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5144 if (ctl->on_strm_q) {
5145 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5146 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5147 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5148 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5151 panic("strmin: %p ctl: %p unknown %d",
5152 strmin, ctl, ctl->on_strm_q);
5157 /* subtract pending on streams */
5158 asoc->size_on_all_streams -= ctl->length;
5159 sctp_ucount_decr(asoc->cnt_on_all_streams);
5160 /* deliver it to at least the delivery-q */
5161 if (stcb->sctp_socket) {
5162 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5163 sctp_add_to_readq(stcb->sctp_ep, stcb,
5165 &stcb->sctp_socket->so_rcv,
5166 1, SCTP_READ_LOCK_HELD,
5167 SCTP_SO_NOT_LOCKED);
5170 /* Its a fragmented message */
5171 if (ctl->first_frag_seen) {
5172 /* Make it so this is next to
5173 * deliver, we restore later */
5174 strmin->last_mid_delivered = ctl->mid - 1;
5175 need_reasm_check = 1;
5180 /* no more delivery now. */
5184 if (need_reasm_check) {
5187 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5188 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5189 /* Restore the next to deliver unless we are ahead */
5190 strmin->last_mid_delivered = mid;
5193 /* Left the front Partial one on */
5196 need_reasm_check = 0;
5199 * now we must deliver things in queue the normal way if any are
5202 mid = strmin->last_mid_delivered + 1;
5203 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5204 if (SCTP_MID_EQ(asoc->idata_supported, mid, ctl->mid)) {
5205 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5206 /* this is deliverable now */
5207 if (ctl->on_strm_q) {
5208 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5209 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5210 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5211 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5214 panic("strmin: %p ctl: %p unknown %d",
5215 strmin, ctl, ctl->on_strm_q);
5220 /* subtract pending on streams */
5221 asoc->size_on_all_streams -= ctl->length;
5222 sctp_ucount_decr(asoc->cnt_on_all_streams);
5223 /* deliver it to at least the delivery-q */
5224 strmin->last_mid_delivered = ctl->mid;
5225 if (stcb->sctp_socket) {
5226 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5227 sctp_add_to_readq(stcb->sctp_ep, stcb,
5229 &stcb->sctp_socket->so_rcv, 1,
5230 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5233 mid = strmin->last_mid_delivered + 1;
5235 /* Its a fragmented message */
5236 if (ctl->first_frag_seen) {
5237 /* Make it so this is next to
5239 strmin->last_mid_delivered = ctl->mid - 1;
5240 need_reasm_check = 1;
5248 if (need_reasm_check) {
5249 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5256 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5257 struct sctp_association *asoc,
5258 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5260 struct sctp_queued_to_read *control;
5261 struct sctp_stream_in *strm;
5262 struct sctp_tmit_chunk *chk, *nchk;
5263 int cnt_removed = 0;
5266 * For now large messages held on the stream reasm that are complete
5267 * will be tossed too. We could in theory do more work to spin
5268 * through and stop after dumping one msg aka seeing the start of a
5269 * new msg at the head, and call the delivery function... to see if
5270 * it can be delivered... But for now we just dump everything on the
5273 strm = &asoc->strmin[stream];
5274 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5275 if (control == NULL) {
5279 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5282 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5283 /* Purge hanging chunks */
5284 if (!asoc->idata_supported && (ordered == 0)) {
5285 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5290 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5291 asoc->size_on_reasm_queue -= chk->send_size;
5292 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5294 sctp_m_freem(chk->data);
5297 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5299 if (!TAILQ_EMPTY(&control->reasm)) {
5300 /* This has to be old data, unordered */
5301 if (control->data) {
5302 sctp_m_freem(control->data);
5303 control->data = NULL;
5305 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5306 chk = TAILQ_FIRST(&control->reasm);
5307 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5308 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5309 sctp_add_chk_to_control(control, strm, stcb, asoc,
5310 chk, SCTP_READ_LOCK_HELD);
5312 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5315 if (control->on_strm_q == SCTP_ON_ORDERED) {
5316 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5317 control->on_strm_q = 0;
5318 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5319 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5320 control->on_strm_q = 0;
5322 } else if (control->on_strm_q) {
5323 panic("strm: %p ctl: %p unknown %d",
5324 strm, control, control->on_strm_q);
5327 control->on_strm_q = 0;
5328 if (control->on_read_q == 0) {
5329 sctp_free_remote_addr(control->whoFrom);
5330 if (control->data) {
5331 sctp_m_freem(control->data);
5332 control->data = NULL;
5334 sctp_free_a_readq(stcb, control);
5339 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5340 struct sctp_forward_tsn_chunk *fwd,
5341 int *abort_flag, struct mbuf *m, int offset)
5343 /* The pr-sctp fwd tsn */
5345 * here we will perform all the data receiver side steps for
5346 * processing FwdTSN, as required in by pr-sctp draft:
5348 * Assume we get FwdTSN(x):
5350 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5351 * + others we have 3) examine and update re-ordering queue on
5352 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5353 * report where we are.
5355 struct sctp_association *asoc;
5356 uint32_t new_cum_tsn, gap;
5357 unsigned int i, fwd_sz, m_size;
5359 struct sctp_stream_in *strm;
5360 struct sctp_queued_to_read *ctl, *sv;
5363 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5364 SCTPDBG(SCTP_DEBUG_INDATA1,
5365 "Bad size too small/big fwd-tsn\n");
5368 m_size = (stcb->asoc.mapping_array_size << 3);
5369 /*************************************************************/
5370 /* 1. Here we update local cumTSN and shift the bitmap array */
5371 /*************************************************************/
5372 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5374 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5375 /* Already got there ... */
5379 * now we know the new TSN is more advanced, let's find the actual
5382 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5383 asoc->cumulative_tsn = new_cum_tsn;
5384 if (gap >= m_size) {
5385 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5386 struct mbuf *op_err;
5387 char msg[SCTP_DIAG_INFO_LEN];
5390 * out of range (of single byte chunks in the rwnd I
5391 * give out). This must be an attacker.
5394 snprintf(msg, sizeof(msg),
5395 "New cum ack %8.8x too high, highest TSN %8.8x",
5396 new_cum_tsn, asoc->highest_tsn_inside_map);
5397 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5398 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5399 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5402 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5404 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5405 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5406 asoc->highest_tsn_inside_map = new_cum_tsn;
5408 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5409 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5411 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5412 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5415 SCTP_TCB_LOCK_ASSERT(stcb);
5416 for (i = 0; i <= gap; i++) {
5417 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5418 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5419 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5420 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5421 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5426 /*************************************************************/
5427 /* 2. Clear up re-assembly queue */
5428 /*************************************************************/
5430 /* This is now done as part of clearing up the stream/seq */
5431 if (asoc->idata_supported == 0) {
5434 /* Flush all the un-ordered data based on cum-tsn */
5435 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5436 for (sid = 0; sid < asoc->streamincnt; sid++) {
5437 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5439 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5441 /*******************************************************/
5442 /* 3. Update the PR-stream re-ordering queues and fix */
5443 /* delivery issues as needed. */
5444 /*******************************************************/
5445 fwd_sz -= sizeof(*fwd);
5448 unsigned int num_str;
5449 uint32_t mid, cur_mid;
5451 uint16_t ordered, flags;
5452 struct sctp_strseq *stseq, strseqbuf;
5453 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5455 offset += sizeof(*fwd);
5457 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5458 if (asoc->idata_supported) {
5459 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5461 num_str = fwd_sz / sizeof(struct sctp_strseq);
5463 for (i = 0; i < num_str; i++) {
5464 if (asoc->idata_supported) {
5465 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5466 sizeof(struct sctp_strseq_mid),
5467 (uint8_t *) & strseqbuf_m);
5468 offset += sizeof(struct sctp_strseq_mid);
5469 if (stseq_m == NULL) {
5472 sid = ntohs(stseq_m->sid);
5473 mid = ntohl(stseq_m->mid);
5474 flags = ntohs(stseq_m->flags);
5475 if (flags & PR_SCTP_UNORDERED_FLAG) {
5481 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5482 sizeof(struct sctp_strseq),
5483 (uint8_t *) & strseqbuf);
5484 offset += sizeof(struct sctp_strseq);
5485 if (stseq == NULL) {
5488 sid = ntohs(stseq->sid);
5489 mid = (uint32_t) ntohs(stseq->ssn);
5497 * Ok we now look for the stream/seq on the read
5498 * queue where its not all delivered. If we find it
5499 * we transmute the read entry into a PDI_ABORTED.
5501 if (sid >= asoc->streamincnt) {
5502 /* screwed up streams, stop! */
5505 if ((asoc->str_of_pdapi == sid) &&
5506 (asoc->ssn_of_pdapi == mid)) {
5508 * If this is the one we were partially
5509 * delivering now then we no longer are.
5510 * Note this will change with the reassembly
5513 asoc->fragmented_delivery_inprogress = 0;
5515 strm = &asoc->strmin[sid];
5516 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5517 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5519 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5520 if ((ctl->sinfo_stream == sid) &&
5521 (SCTP_MID_EQ(asoc->idata_supported, ctl->mid, mid))) {
5522 str_seq = (sid << 16) | (0x0000ffff & mid);
5523 ctl->pdapi_aborted = 1;
5524 sv = stcb->asoc.control_pdapi;
5526 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5527 TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5528 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5529 TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5531 } else if (ctl->on_strm_q) {
5532 panic("strm: %p ctl: %p unknown %d",
5533 strm, ctl, ctl->on_strm_q);
5537 stcb->asoc.control_pdapi = ctl;
5538 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5540 SCTP_PARTIAL_DELIVERY_ABORTED,
5542 SCTP_SO_NOT_LOCKED);
5543 stcb->asoc.control_pdapi = sv;
5545 } else if ((ctl->sinfo_stream == sid) &&
5546 SCTP_MID_GT(asoc->idata_supported, ctl->mid, mid)) {
5547 /* We are past our victim SSN */
5551 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5552 /* Update the sequence number */
5553 strm->last_mid_delivered = mid;
5555 /* now kick the stream the new way */
5556 /* sa_ignore NO_NULL_CHK */
5557 sctp_kick_prsctp_reorder_queue(stcb, strm);
5559 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5562 * Now slide thing forward.
5564 sctp_slide_mapping_arrays(stcb);