2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
54 * NOTES: On the outbound side of things I need to check the sack timer to
55 * see if I should generate a sack into the chunk queue (if I have data to
56 * send that is and will be sending it .. for bundling.
58 * The callback in sctp_usrreq.c will get called when the socket is read from.
59 * This will cause sctp_service_queues() to get called on the top entry in
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64 struct sctp_stream_in *strm,
65 struct sctp_tcb *stcb,
66 struct sctp_association *asoc,
67 struct sctp_tmit_chunk *chk, int lock_held);
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 /* Calculate what the rwnd would be */
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
83 * This is really set wrong with respect to a 1-2-m socket. Since
84 * the sb_cc is the count that everyone as put up. When we re-write
85 * sctp_soreceive then we will fix this so that ONLY this
86 * associations data is taken into account.
88 if (stcb->sctp_socket == NULL) {
91 if (stcb->asoc.sb_cc == 0 &&
92 asoc->size_on_reasm_queue == 0 &&
93 asoc->size_on_all_streams == 0) {
94 /* Full rwnd granted */
95 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
98 /* get actual space */
99 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
101 * take out what has NOT been put on socket queue and we yet hold
104 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 asoc->cnt_on_reasm_queue * MSIZE));
106 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 asoc->cnt_on_all_streams * MSIZE));
112 /* what is the overhead of all these rwnd's */
113 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
115 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 * even it is 0. SWS engaged
118 if (calc < stcb->asoc.my_rwnd_control_len) {
127 * Build out our readq entry based on the incoming packet.
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131 struct sctp_nets *net,
132 uint32_t tsn, uint32_t ppid,
133 uint32_t context, uint16_t sid,
134 uint32_t mid, uint8_t flags,
137 struct sctp_queued_to_read *read_queue_e = NULL;
139 sctp_alloc_a_readq(stcb, read_queue_e);
140 if (read_queue_e == NULL) {
143 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 read_queue_e->sinfo_stream = sid;
145 read_queue_e->sinfo_flags = (flags << 8);
146 read_queue_e->sinfo_ppid = ppid;
147 read_queue_e->sinfo_context = context;
148 read_queue_e->sinfo_tsn = tsn;
149 read_queue_e->sinfo_cumtsn = tsn;
150 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
151 read_queue_e->mid = mid;
152 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 TAILQ_INIT(&read_queue_e->reasm);
154 read_queue_e->whoFrom = net;
155 atomic_add_int(&net->ref_count, 1);
156 read_queue_e->data = dm;
157 read_queue_e->stcb = stcb;
158 read_queue_e->port_from = stcb->rport;
160 return (read_queue_e);
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
166 struct sctp_extrcvinfo *seinfo;
167 struct sctp_sndrcvinfo *outinfo;
168 struct sctp_rcvinfo *rcvinfo;
169 struct sctp_nxtinfo *nxtinfo;
176 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 /* user does not want any ancillary data */
183 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
186 seinfo = (struct sctp_extrcvinfo *)sinfo;
187 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
190 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
197 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
200 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
206 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
211 SCTP_BUF_LEN(ret) = 0;
213 /* We need a CMSG header followed by the struct */
214 cmh = mtod(ret, struct cmsghdr *);
216 * Make sure that there is no un-initialized padding between the
217 * cmsg header and cmsg data and after the cmsg data.
220 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 cmh->cmsg_level = IPPROTO_SCTP;
222 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 cmh->cmsg_type = SCTP_RCVINFO;
224 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 rcvinfo->rcv_context = sinfo->sinfo_context;
232 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
237 cmh->cmsg_level = IPPROTO_SCTP;
238 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 cmh->cmsg_type = SCTP_NXTINFO;
240 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 nxtinfo->nxt_flags = 0;
243 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 nxtinfo->nxt_flags |= SCTP_UNORDERED;
246 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 nxtinfo->nxt_flags |= SCTP_COMPLETE;
252 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 cmh->cmsg_level = IPPROTO_SCTP;
260 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
262 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 cmh->cmsg_type = SCTP_EXTRCV;
264 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 cmh->cmsg_type = SCTP_SNDRCV;
270 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
280 uint32_t gap, i, cumackp1;
282 int in_r = 0, in_nr = 0;
284 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
287 cumackp1 = asoc->cumulative_tsn + 1;
288 if (SCTP_TSN_GT(cumackp1, tsn)) {
290 * this tsn is behind the cum ack and thus we don't need to
291 * worry about it being moved from one to the other.
295 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 if ((in_r == 0) && (in_nr == 0)) {
300 panic("Things are really messed up now");
302 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 sctp_print_mapping_array(asoc);
307 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
309 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 asoc->highest_tsn_inside_nr_map = tsn;
313 if (tsn == asoc->highest_tsn_inside_map) {
314 /* We must back down to see what the new highest is */
315 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 asoc->highest_tsn_inside_map = i;
324 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331 struct sctp_association *asoc,
332 struct sctp_queued_to_read *control)
334 struct sctp_queued_to_read *at;
335 struct sctp_readhead *q;
336 uint8_t flags, unordered;
338 flags = (control->sinfo_flags >> 8);
339 unordered = flags & SCTP_DATA_UNORDERED;
341 q = &strm->uno_inqueue;
342 if (asoc->idata_supported == 0) {
343 if (!TAILQ_EMPTY(q)) {
344 /* Only one stream can be here in old style
348 TAILQ_INSERT_TAIL(q, control, next_instrm);
349 control->on_strm_q = SCTP_ON_UNORDERED;
355 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
356 control->end_added = 1;
357 control->first_frag_seen = 1;
358 control->last_frag_seen = 1;
360 if (TAILQ_EMPTY(q)) {
362 TAILQ_INSERT_HEAD(q, control, next_instrm);
364 control->on_strm_q = SCTP_ON_UNORDERED;
366 control->on_strm_q = SCTP_ON_ORDERED;
370 TAILQ_FOREACH(at, q, next_instrm) {
371 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
373 * one in queue is bigger than the new one,
374 * insert before this one
376 TAILQ_INSERT_BEFORE(at, control, next_instrm);
378 control->on_strm_q = SCTP_ON_UNORDERED;
380 control->on_strm_q = SCTP_ON_ORDERED;
383 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
385 * Gak, He sent me a duplicate msg id
386 * number?? return -1 to abort.
390 if (TAILQ_NEXT(at, next_instrm) == NULL) {
392 * We are at the end, insert it
395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 sctp_log_strm_del(control, at,
397 SCTP_STR_LOG_FROM_INSERT_TL);
399 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
401 control->on_strm_q = SCTP_ON_UNORDERED;
403 control->on_strm_q = SCTP_ON_ORDERED;
414 sctp_abort_in_reasm(struct sctp_tcb *stcb,
415 struct sctp_queued_to_read *control,
416 struct sctp_tmit_chunk *chk,
417 int *abort_flag, int opspot)
419 char msg[SCTP_DIAG_INFO_LEN];
422 if (stcb->asoc.idata_supported) {
423 snprintf(msg, sizeof(msg),
424 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
426 control->fsn_included,
429 chk->rec.data.fsn, chk->rec.data.mid);
431 snprintf(msg, sizeof(msg),
432 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
434 control->fsn_included,
438 (uint16_t) chk->rec.data.mid);
440 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
441 sctp_m_freem(chk->data);
443 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
444 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
445 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
450 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
453 * The control could not be placed and must be cleaned.
455 struct sctp_tmit_chunk *chk, *nchk;
457 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
458 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
460 sctp_m_freem(chk->data);
462 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
464 sctp_free_a_readq(stcb, control);
468 * Queue the chunk either right into the socket buffer if it is the next one
469 * to go OR put it in the correct place in the delivery queue. If we do
470 * append to the so_buf, keep doing so until we are out of order as
471 * long as the control's entered are non-fragmented.
474 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
475 struct sctp_stream_in *strm,
476 struct sctp_association *asoc,
477 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
480 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
481 * all the data in one stream this could happen quite rapidly. One
482 * could use the TSN to keep track of things, but this scheme breaks
483 * down in the other type of stream usage that could occur. Send a
484 * single msg to stream 0, send 4Billion messages to stream 1, now
485 * send a message to stream 0. You have a situation where the TSN
486 * has wrapped but not in the stream. Is this worth worrying about
487 * or should we just change our queue sort at the bottom to be by
490 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
491 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
492 * assignment this could happen... and I don't see how this would be
493 * a violation. So for now I am undecided an will leave the sort by
494 * SSN alone. Maybe a hybred approach is the answer
497 struct sctp_queued_to_read *at;
501 char msg[SCTP_DIAG_INFO_LEN];
503 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
504 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
506 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
507 /* The incoming sseq is behind where we last delivered? */
508 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
509 control->mid, strm->last_mid_delivered);
512 * throw it in the stream so it gets cleaned up in
513 * association destruction
515 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
516 if (asoc->idata_supported) {
517 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
518 strm->last_mid_delivered, control->sinfo_tsn,
519 control->sinfo_stream, control->mid);
521 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
522 (uint16_t) strm->last_mid_delivered,
524 control->sinfo_stream,
525 (uint16_t) control->mid);
527 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
528 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
529 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
534 if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
538 asoc->size_on_all_streams += control->length;
539 sctp_ucount_incr(asoc->cnt_on_all_streams);
540 nxt_todel = strm->last_mid_delivered + 1;
541 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
542 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
545 so = SCTP_INP_SO(stcb->sctp_ep);
546 atomic_add_int(&stcb->asoc.refcnt, 1);
547 SCTP_TCB_UNLOCK(stcb);
548 SCTP_SOCKET_LOCK(so, 1);
550 atomic_subtract_int(&stcb->asoc.refcnt, 1);
551 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
552 SCTP_SOCKET_UNLOCK(so, 1);
556 /* can be delivered right away? */
557 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
558 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
560 /* EY it wont be queued if it could be delivered directly */
562 asoc->size_on_all_streams -= control->length;
563 sctp_ucount_decr(asoc->cnt_on_all_streams);
564 strm->last_mid_delivered++;
565 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
566 sctp_add_to_readq(stcb->sctp_ep, stcb,
568 &stcb->sctp_socket->so_rcv, 1,
569 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
570 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
572 nxt_todel = strm->last_mid_delivered + 1;
573 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
574 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
575 asoc->size_on_all_streams -= control->length;
576 sctp_ucount_decr(asoc->cnt_on_all_streams);
577 if (control->on_strm_q == SCTP_ON_ORDERED) {
578 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
581 panic("Huh control: %p is on_strm_q: %d",
582 control, control->on_strm_q);
585 control->on_strm_q = 0;
586 strm->last_mid_delivered++;
588 * We ignore the return of deliver_data here
589 * since we always can hold the chunk on the
590 * d-queue. And we have a finite number that
591 * can be delivered from the strq.
593 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
594 sctp_log_strm_del(control, NULL,
595 SCTP_STR_LOG_FROM_IMMED_DEL);
597 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
598 sctp_add_to_readq(stcb->sctp_ep, stcb,
600 &stcb->sctp_socket->so_rcv, 1,
601 SCTP_READ_LOCK_NOT_HELD,
604 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
609 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
610 SCTP_SOCKET_UNLOCK(so, 1);
615 * Ok, we did not deliver this guy, find the correct place
616 * to put it on the queue.
618 if (sctp_place_control_in_stream(strm, asoc, control)) {
619 snprintf(msg, sizeof(msg),
620 "Queue to str MID: %u duplicate",
622 sctp_clean_up_control(stcb, control);
623 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
624 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
625 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
633 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
635 struct mbuf *m, *prev = NULL;
636 struct sctp_tcb *stcb;
638 stcb = control->stcb;
639 control->held_length = 0;
643 if (SCTP_BUF_LEN(m) == 0) {
644 /* Skip mbufs with NO length */
647 control->data = sctp_m_free(m);
650 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
651 m = SCTP_BUF_NEXT(prev);
654 control->tail_mbuf = prev;
659 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
660 if (control->on_read_q) {
662 * On read queue so we must increment the SB stuff,
663 * we assume caller has done any locks of SB.
665 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
667 m = SCTP_BUF_NEXT(m);
670 control->tail_mbuf = prev;
675 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
677 struct mbuf *prev = NULL;
678 struct sctp_tcb *stcb;
680 stcb = control->stcb;
683 panic("Control broken");
688 if (control->tail_mbuf == NULL) {
691 sctp_setup_tail_pointer(control);
694 control->tail_mbuf->m_next = m;
696 if (SCTP_BUF_LEN(m) == 0) {
697 /* Skip mbufs with NO length */
700 control->tail_mbuf->m_next = sctp_m_free(m);
701 m = control->tail_mbuf->m_next;
703 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
704 m = SCTP_BUF_NEXT(prev);
707 control->tail_mbuf = prev;
712 if (control->on_read_q) {
714 * On read queue so we must increment the SB stuff,
715 * we assume caller has done any locks of SB.
717 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
719 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
720 m = SCTP_BUF_NEXT(m);
723 control->tail_mbuf = prev;
728 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
730 memset(nc, 0, sizeof(struct sctp_queued_to_read));
731 nc->sinfo_stream = control->sinfo_stream;
732 nc->mid = control->mid;
733 TAILQ_INIT(&nc->reasm);
734 nc->top_fsn = control->top_fsn;
735 nc->mid = control->mid;
736 nc->sinfo_flags = control->sinfo_flags;
737 nc->sinfo_ppid = control->sinfo_ppid;
738 nc->sinfo_context = control->sinfo_context;
739 nc->fsn_included = 0xffffffff;
740 nc->sinfo_tsn = control->sinfo_tsn;
741 nc->sinfo_cumtsn = control->sinfo_cumtsn;
742 nc->sinfo_assoc_id = control->sinfo_assoc_id;
743 nc->whoFrom = control->whoFrom;
744 atomic_add_int(&nc->whoFrom->ref_count, 1);
745 nc->stcb = control->stcb;
746 nc->port_from = control->port_from;
750 sctp_reset_a_control(struct sctp_queued_to_read *control,
751 struct sctp_inpcb *inp, uint32_t tsn)
753 control->fsn_included = tsn;
754 if (control->on_read_q) {
756 * We have to purge it from there, hopefully this will work
759 TAILQ_REMOVE(&inp->read_queue, control, next);
760 control->on_read_q = 0;
765 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
766 struct sctp_association *asoc,
767 struct sctp_stream_in *strm,
768 struct sctp_queued_to_read *control,
770 int inp_read_lock_held)
773 * Special handling for the old un-ordered data chunk. All the
774 * chunks/TSN's go to mid 0. So we have to do the old style watching
775 * to see if we have it all. If you return one, no other control
776 * entries on the un-ordered queue will be looked at. In theory
777 * there should be no others entries in reality, unless the guy is
778 * sending both unordered NDATA and unordered DATA...
780 struct sctp_tmit_chunk *chk, *lchk, *tchk;
782 struct sctp_queued_to_read *nc;
785 if (control->first_frag_seen == 0) {
786 /* Nothing we can do, we have not seen the first piece yet */
789 /* Collapse any we can */
792 fsn = control->fsn_included + 1;
793 /* Now what can we add? */
794 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
795 if (chk->rec.data.fsn == fsn) {
797 sctp_alloc_a_readq(stcb, nc);
801 memset(nc, 0, sizeof(struct sctp_queued_to_read));
802 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
803 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
807 if (control->end_added) {
809 if (!TAILQ_EMPTY(&control->reasm)) {
811 * Ok we have to move anything left
812 * on the control queue to a new
815 sctp_build_readq_entry_from_ctl(nc, control);
816 tchk = TAILQ_FIRST(&control->reasm);
817 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
818 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
819 asoc->size_on_reasm_queue -= tchk->send_size;
820 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
821 nc->first_frag_seen = 1;
822 nc->fsn_included = tchk->rec.data.fsn;
823 nc->data = tchk->data;
824 nc->sinfo_ppid = tchk->rec.data.ppid;
825 nc->sinfo_tsn = tchk->rec.data.tsn;
826 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
828 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
829 sctp_setup_tail_pointer(nc);
830 tchk = TAILQ_FIRST(&control->reasm);
832 /* Spin the rest onto the queue */
834 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
835 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
836 tchk = TAILQ_FIRST(&control->reasm);
838 /* Now lets add it to the queue
839 * after removing control */
840 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
841 nc->on_strm_q = SCTP_ON_UNORDERED;
842 if (control->on_strm_q) {
843 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
844 control->on_strm_q = 0;
847 if (control->pdapi_started) {
848 strm->pd_api_started = 0;
849 control->pdapi_started = 0;
851 if (control->on_strm_q) {
852 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
853 control->on_strm_q = 0;
854 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
856 if (control->on_read_q == 0) {
857 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
858 &stcb->sctp_socket->so_rcv, control->end_added,
859 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
861 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
862 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
863 /* Switch to the new guy and
868 if (nc->on_strm_q == 0) {
869 sctp_free_a_readq(stcb, nc);
874 sctp_free_a_readq(stcb, nc);
881 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
882 strm->pd_api_started = 1;
883 control->pdapi_started = 1;
884 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
885 &stcb->sctp_socket->so_rcv, control->end_added,
886 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
887 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
895 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
896 struct sctp_association *asoc,
897 struct sctp_queued_to_read *control,
898 struct sctp_tmit_chunk *chk,
901 struct sctp_tmit_chunk *at;
905 * Here we need to place the chunk into the control structure sorted
906 * in the correct order.
908 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
909 /* Its the very first one. */
910 SCTPDBG(SCTP_DEBUG_XXX,
911 "chunk is a first fsn: %u becomes fsn_included\n",
913 if (control->first_frag_seen) {
915 * In old un-ordered we can reassembly on one
916 * control multiple messages. As long as the next
917 * FIRST is greater then the old first (TSN i.e. FSN
923 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
924 /* Easy way the start of a new guy beyond
928 if ((chk->rec.data.fsn == control->fsn_included) ||
929 (control->pdapi_started)) {
931 * Ok this should not happen, if it does we
932 * started the pd-api on the higher TSN
933 * (since the equals part is a TSN failure
936 * We are completly hosed in that case since
937 * I have no way to recover. This really
938 * will only happen if we can get more TSN's
939 * higher before the pd-api-point.
941 sctp_abort_in_reasm(stcb, control, chk,
943 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
948 * Ok we have two firsts and the one we just got is
949 * smaller than the one we previously placed.. yuck!
950 * We must swap them out.
953 tdata = control->data;
954 control->data = chk->data;
956 /* Save the lengths */
957 chk->send_size = control->length;
958 /* Recompute length of control and tail pointer */
959 sctp_setup_tail_pointer(control);
960 /* Fix the FSN included */
961 tmp = control->fsn_included;
962 control->fsn_included = chk->rec.data.fsn;
963 chk->rec.data.fsn = tmp;
964 /* Fix the TSN included */
965 tmp = control->sinfo_tsn;
966 control->sinfo_tsn = chk->rec.data.tsn;
967 chk->rec.data.tsn = tmp;
968 /* Fix the PPID included */
969 tmp = control->sinfo_ppid;
970 control->sinfo_ppid = chk->rec.data.ppid;
971 chk->rec.data.ppid = tmp;
972 /* Fix tail pointer */
975 control->first_frag_seen = 1;
976 control->fsn_included = chk->rec.data.fsn;
977 control->top_fsn = chk->rec.data.fsn;
978 control->sinfo_tsn = chk->rec.data.tsn;
979 control->sinfo_ppid = chk->rec.data.ppid;
980 control->data = chk->data;
981 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
983 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
984 sctp_setup_tail_pointer(control);
989 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
990 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
992 * This one in queue is bigger than the new one,
993 * insert the new one before at.
995 asoc->size_on_reasm_queue += chk->send_size;
996 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
998 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1000 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1002 * They sent a duplicate fsn number. This really
1003 * should not happen since the FSN is a TSN and it
1004 * should have been dropped earlier.
1006 sctp_abort_in_reasm(stcb, control, chk,
1008 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1012 if (inserted == 0) {
1013 /* Its at the end */
1014 asoc->size_on_reasm_queue += chk->send_size;
1015 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1016 control->top_fsn = chk->rec.data.fsn;
1017 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1022 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1023 struct sctp_stream_in *strm, int inp_read_lock_held)
1026 * Given a stream, strm, see if any of the SSN's on it that are
1027 * fragmented are ready to deliver. If so go ahead and place them on
1028 * the read queue. In so placing if we have hit the end, then we
1029 * need to remove them from the stream's queue.
1031 struct sctp_queued_to_read *control, *nctl = NULL;
1032 uint32_t next_to_del;
1036 if (stcb->sctp_socket) {
1037 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1038 stcb->sctp_ep->partial_delivery_point);
1040 pd_point = stcb->sctp_ep->partial_delivery_point;
1042 control = TAILQ_FIRST(&strm->uno_inqueue);
1045 (asoc->idata_supported == 0)) {
1046 /* Special handling needed for "old" data format */
1047 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1051 if (strm->pd_api_started) {
1052 /* Can't add more */
1056 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1057 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1058 nctl = TAILQ_NEXT(control, next_instrm);
1059 if (control->end_added) {
1060 /* We just put the last bit on */
1061 if (control->on_strm_q) {
1063 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1064 panic("Huh control: %p on_q: %d -- not unordered?",
1065 control, control->on_strm_q);
1068 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1069 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1070 control->on_strm_q = 0;
1072 if (control->on_read_q == 0) {
1073 sctp_add_to_readq(stcb->sctp_ep, stcb,
1075 &stcb->sctp_socket->so_rcv, control->end_added,
1076 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1079 /* Can we do a PD-API for this un-ordered guy? */
1080 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1081 strm->pd_api_started = 1;
1082 control->pdapi_started = 1;
1083 sctp_add_to_readq(stcb->sctp_ep, stcb,
1085 &stcb->sctp_socket->so_rcv, control->end_added,
1086 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1094 control = TAILQ_FIRST(&strm->inqueue);
1095 if (strm->pd_api_started) {
1096 /* Can't add more */
1099 if (control == NULL) {
1102 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1104 * Ok the guy at the top was being partially delivered
1105 * completed, so we remove it. Note the pd_api flag was
1106 * taken off when the chunk was merged on in
1107 * sctp_queue_data_for_reasm below.
1109 nctl = TAILQ_NEXT(control, next_instrm);
1110 SCTPDBG(SCTP_DEBUG_XXX,
1111 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1112 control, control->end_added, control->mid,
1113 control->top_fsn, control->fsn_included,
1114 strm->last_mid_delivered);
1115 if (control->end_added) {
1116 if (control->on_strm_q) {
1118 if (control->on_strm_q != SCTP_ON_ORDERED) {
1119 panic("Huh control: %p on_q: %d -- not ordered?",
1120 control, control->on_strm_q);
1123 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1124 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1125 control->on_strm_q = 0;
1127 if (strm->pd_api_started && control->pdapi_started) {
1128 control->pdapi_started = 0;
1129 strm->pd_api_started = 0;
1131 if (control->on_read_q == 0) {
1132 sctp_add_to_readq(stcb->sctp_ep, stcb,
1134 &stcb->sctp_socket->so_rcv, control->end_added,
1135 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1140 if (strm->pd_api_started) {
1141 /* Can't add more must have gotten an un-ordered above being
1142 * partially delivered. */
1146 next_to_del = strm->last_mid_delivered + 1;
1148 SCTPDBG(SCTP_DEBUG_XXX,
1149 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1150 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1152 nctl = TAILQ_NEXT(control, next_instrm);
1153 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1154 (control->first_frag_seen)) {
1157 /* Ok we can deliver it onto the stream. */
1158 if (control->end_added) {
1159 /* We are done with it afterwards */
1160 if (control->on_strm_q) {
1162 if (control->on_strm_q != SCTP_ON_ORDERED) {
1163 panic("Huh control: %p on_q: %d -- not ordered?",
1164 control, control->on_strm_q);
1167 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1168 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1169 control->on_strm_q = 0;
1173 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1174 /* A singleton now slipping through - mark
1175 * it non-revokable too */
1176 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1177 } else if (control->end_added == 0) {
1178 /* Check if we can defer adding until its
1180 if ((control->length < pd_point) || (strm->pd_api_started)) {
1181 /* Don't need it or cannot add more
1182 * (one being delivered that way) */
1186 done = (control->end_added) && (control->last_frag_seen);
1187 if (control->on_read_q == 0) {
1188 sctp_add_to_readq(stcb->sctp_ep, stcb,
1190 &stcb->sctp_socket->so_rcv, control->end_added,
1191 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1193 strm->last_mid_delivered = next_to_del;
1198 /* We are now doing PD API */
1199 strm->pd_api_started = 1;
1200 control->pdapi_started = 1;
1210 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1211 struct sctp_stream_in *strm,
1212 struct sctp_tcb *stcb, struct sctp_association *asoc,
1213 struct sctp_tmit_chunk *chk, int hold_rlock)
1216 * Given a control and a chunk, merge the data from the chk onto the
1217 * control and free up the chunk resources.
1221 if (control->on_read_q && (hold_rlock == 0)) {
1223 * Its being pd-api'd so we must do some locks.
1225 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1228 if (control->data == NULL) {
1229 control->data = chk->data;
1230 sctp_setup_tail_pointer(control);
1232 sctp_add_to_tail_pointer(control, chk->data);
1234 control->fsn_included = chk->rec.data.fsn;
1235 asoc->size_on_reasm_queue -= chk->send_size;
1236 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1237 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1239 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1240 control->first_frag_seen = 1;
1241 control->sinfo_tsn = chk->rec.data.tsn;
1242 control->sinfo_ppid = chk->rec.data.ppid;
1244 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1246 if ((control->on_strm_q) && (control->on_read_q)) {
1247 if (control->pdapi_started) {
1248 control->pdapi_started = 0;
1249 strm->pd_api_started = 0;
1251 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1253 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1254 control->on_strm_q = 0;
1255 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1257 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1258 control->on_strm_q = 0;
1260 } else if (control->on_strm_q) {
1261 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1262 control->on_strm_q);
1266 control->end_added = 1;
1267 control->last_frag_seen = 1;
1270 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1272 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1276 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1277 * queue, see if anthing can be delivered. If so pull it off (or as much as
1278 * we can. If we run out of space then we must dump what we can and set the
1279 * appropriate flag to say we queued what we could.
1282 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1283 struct sctp_stream_in *strm,
1284 struct sctp_queued_to_read *control,
1285 struct sctp_tmit_chunk *chk,
1286 int created_control,
1287 int *abort_flag, uint32_t tsn)
1290 struct sctp_tmit_chunk *at, *nat;
1291 int do_wakeup, unordered;
1294 * For old un-ordered data chunks.
1296 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1301 /* Must be added to the stream-in queue */
1302 if (created_control) {
1303 if (sctp_place_control_in_stream(strm, asoc, control)) {
1304 /* Duplicate SSN? */
1305 sctp_clean_up_control(stcb, control);
1306 sctp_abort_in_reasm(stcb, control, chk,
1308 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1311 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1313 * Ok we created this control and now lets validate
1314 * that its legal i.e. there is a B bit set, if not
1315 * and we have up to the cum-ack then its invalid.
1317 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1318 sctp_abort_in_reasm(stcb, control, chk,
1320 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1325 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1326 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1330 * Ok we must queue the chunk into the reasembly portion: o if its
1331 * the first it goes to the control mbuf. o if its not first but the
1332 * next in sequence it goes to the control, and each succeeding one
1333 * in order also goes. o if its not in order we place it on the list
1336 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1337 /* Its the very first one. */
1338 SCTPDBG(SCTP_DEBUG_XXX,
1339 "chunk is a first fsn: %u becomes fsn_included\n",
1341 if (control->first_frag_seen) {
1343 * Error on senders part, they either sent us two
1344 * data chunks with FIRST, or they sent two
1345 * un-ordered chunks that were fragmented at the
1346 * same time in the same stream.
1348 sctp_abort_in_reasm(stcb, control, chk,
1350 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1353 control->first_frag_seen = 1;
1354 control->sinfo_ppid = chk->rec.data.ppid;
1355 control->sinfo_tsn = chk->rec.data.tsn;
1356 control->fsn_included = chk->rec.data.fsn;
1357 control->data = chk->data;
1358 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1360 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1361 sctp_setup_tail_pointer(control);
1363 /* Place the chunk in our list */
1366 if (control->last_frag_seen == 0) {
1367 /* Still willing to raise highest FSN seen */
1368 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1369 SCTPDBG(SCTP_DEBUG_XXX,
1370 "We have a new top_fsn: %u\n",
1372 control->top_fsn = chk->rec.data.fsn;
1374 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1375 SCTPDBG(SCTP_DEBUG_XXX,
1376 "The last fsn is now in place fsn: %u\n",
1378 control->last_frag_seen = 1;
1380 if (asoc->idata_supported || control->first_frag_seen) {
1382 * For IDATA we always check since we know
1383 * that the first fragment is 0. For old
1384 * DATA we have to receive the first before
1385 * we know the first FSN (which is the TSN).
1387 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1388 /* We have already delivered up to
1389 * this so its a dup */
1390 sctp_abort_in_reasm(stcb, control, chk,
1392 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1397 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1398 /* Second last? huh? */
1399 SCTPDBG(SCTP_DEBUG_XXX,
1400 "Duplicate last fsn: %u (top: %u) -- abort\n",
1401 chk->rec.data.fsn, control->top_fsn);
1402 sctp_abort_in_reasm(stcb, control,
1404 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1407 if (asoc->idata_supported || control->first_frag_seen) {
1409 * For IDATA we always check since we know
1410 * that the first fragment is 0. For old
1411 * DATA we have to receive the first before
1412 * we know the first FSN (which is the TSN).
1415 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1416 /* We have already delivered up to
1417 * this so its a dup */
1418 SCTPDBG(SCTP_DEBUG_XXX,
1419 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1420 chk->rec.data.fsn, control->fsn_included);
1421 sctp_abort_in_reasm(stcb, control, chk,
1423 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1427 /* validate not beyond top FSN if we have seen last
1429 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1430 SCTPDBG(SCTP_DEBUG_XXX,
1431 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1434 sctp_abort_in_reasm(stcb, control, chk,
1436 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1441 * If we reach here, we need to place the new chunk in the
1442 * reassembly for this control.
1444 SCTPDBG(SCTP_DEBUG_XXX,
1445 "chunk is a not first fsn: %u needs to be inserted\n",
1447 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1448 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1450 * This one in queue is bigger than the new
1451 * one, insert the new one before at.
1453 SCTPDBG(SCTP_DEBUG_XXX,
1454 "Insert it before fsn: %u\n",
1456 asoc->size_on_reasm_queue += chk->send_size;
1457 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1458 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1461 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1462 /* Gak, He sent me a duplicate str seq
1465 * foo bar, I guess I will just free this
1466 * new guy, should we abort too? FIX ME
1467 * MAYBE? Or it COULD be that the SSN's have
1468 * wrapped. Maybe I should compare to TSN
1469 * somehow... sigh for now just blow away
1472 SCTPDBG(SCTP_DEBUG_XXX,
1473 "Duplicate to fsn: %u -- abort\n",
1475 sctp_abort_in_reasm(stcb, control,
1477 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1481 if (inserted == 0) {
1482 /* Goes on the end */
1483 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1485 asoc->size_on_reasm_queue += chk->send_size;
1486 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1487 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1491 * Ok lets see if we can suck any up into the control structure that
1492 * are in seq if it makes sense.
1496 * If the first fragment has not been seen there is no sense in
1499 if (control->first_frag_seen) {
1500 next_fsn = control->fsn_included + 1;
1501 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1502 if (at->rec.data.fsn == next_fsn) {
1503 /* We can add this one now to the control */
1504 SCTPDBG(SCTP_DEBUG_XXX,
1505 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1508 next_fsn, control->fsn_included);
1509 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1510 sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1511 if (control->on_read_q) {
1515 if (control->end_added && control->pdapi_started) {
1516 if (strm->pd_api_started) {
1517 strm->pd_api_started = 0;
1518 control->pdapi_started = 0;
1520 if (control->on_read_q == 0) {
1521 sctp_add_to_readq(stcb->sctp_ep, stcb,
1523 &stcb->sctp_socket->so_rcv, control->end_added,
1524 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1535 /* Need to wakeup the reader */
1536 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1540 static struct sctp_queued_to_read *
1541 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1543 struct sctp_queued_to_read *control;
1546 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1547 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1552 if (idata_supported) {
1553 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1554 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1559 control = TAILQ_FIRST(&strm->uno_inqueue);
1566 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1567 struct mbuf **m, int offset, int chk_length,
1568 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1569 int *break_flag, int last_chunk, uint8_t chk_type)
1571 /* Process a data chunk */
1572 /* struct sctp_tmit_chunk *chk; */
1573 struct sctp_tmit_chunk *chk;
1574 uint32_t tsn, fsn, gap, mid;
1577 int need_reasm_check = 0;
1579 struct mbuf *op_err;
1580 char msg[SCTP_DIAG_INFO_LEN];
1581 struct sctp_queued_to_read *control = NULL;
1584 struct sctp_stream_reset_list *liste;
1585 struct sctp_stream_in *strm;
1588 int created_control = 0;
1590 if (chk_type == SCTP_IDATA) {
1591 struct sctp_idata_chunk *chunk, chunk_buf;
1593 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1594 sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1595 chk_flags = chunk->ch.chunk_flags;
1596 clen = sizeof(struct sctp_idata_chunk);
1597 tsn = ntohl(chunk->dp.tsn);
1598 sid = ntohs(chunk->dp.sid);
1599 mid = ntohl(chunk->dp.mid);
1600 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1602 ppid = chunk->dp.ppid_fsn.ppid;
1604 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1605 ppid = 0xffffffff; /* Use as an invalid value. */
1608 struct sctp_data_chunk *chunk, chunk_buf;
1610 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1611 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1612 chk_flags = chunk->ch.chunk_flags;
1613 clen = sizeof(struct sctp_data_chunk);
1614 tsn = ntohl(chunk->dp.tsn);
1615 sid = ntohs(chunk->dp.sid);
1616 mid = (uint32_t) (ntohs(chunk->dp.ssn));
1618 ppid = chunk->dp.ppid;
1620 if ((size_t)chk_length == clen) {
1622 * Need to send an abort since we had a empty data chunk.
1624 op_err = sctp_generate_no_user_data_cause(tsn);
1625 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1626 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1630 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1631 asoc->send_sack = 1;
1633 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1634 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1635 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1640 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1641 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1642 /* It is a duplicate */
1643 SCTP_STAT_INCR(sctps_recvdupdata);
1644 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1645 /* Record a dup for the next outbound sack */
1646 asoc->dup_tsns[asoc->numduptsns] = tsn;
1649 asoc->send_sack = 1;
1652 /* Calculate the number of TSN's between the base and this TSN */
1653 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1654 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1655 /* Can't hold the bit in the mapping at max array, toss it */
1658 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1659 SCTP_TCB_LOCK_ASSERT(stcb);
1660 if (sctp_expand_mapping_array(asoc, gap)) {
1661 /* Can't expand, drop it */
1665 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1668 /* See if we have received this one already */
1669 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1670 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1671 SCTP_STAT_INCR(sctps_recvdupdata);
1672 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1673 /* Record a dup for the next outbound sack */
1674 asoc->dup_tsns[asoc->numduptsns] = tsn;
1677 asoc->send_sack = 1;
1681 * Check to see about the GONE flag, duplicates would cause a sack
1682 * to be sent up above
1684 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1685 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1686 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1688 * wait a minute, this guy is gone, there is no longer a
1689 * receiver. Send peer an ABORT!
1691 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1692 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1697 * Now before going further we see if there is room. If NOT then we
1698 * MAY let one through only IF this TSN is the one we are waiting
1699 * for on a partial delivery API.
1702 /* Is the stream valid? */
1703 if (sid >= asoc->streamincnt) {
1704 struct sctp_error_invalid_stream *cause;
1706 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1707 0, M_NOWAIT, 1, MT_DATA);
1708 if (op_err != NULL) {
1709 /* add some space up front so prepend will work well */
1710 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1711 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1713 * Error causes are just param's and this one has
1714 * two back to back phdr, one with the error type
1715 * and size, the other with the streamid and a rsvd
1717 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1718 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1719 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1720 cause->stream_id = htons(sid);
1721 cause->reserved = htons(0);
1722 sctp_queue_op_err(stcb, op_err);
1724 SCTP_STAT_INCR(sctps_badsid);
1725 SCTP_TCB_LOCK_ASSERT(stcb);
1726 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1727 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1728 asoc->highest_tsn_inside_nr_map = tsn;
1730 if (tsn == (asoc->cumulative_tsn + 1)) {
1731 /* Update cum-ack */
1732 asoc->cumulative_tsn = tsn;
1736 strm = &asoc->strmin[sid];
1738 * If its a fragmented message, lets see if we can find the control
1739 * on the reassembly queues.
1741 if ((chk_type == SCTP_IDATA) &&
1742 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1745 * The first *must* be fsn 0, and other (middle/end) pieces
1746 * can *not* be fsn 0. XXX: This can happen in case of a
1747 * wrap around. Ignore is for now.
1749 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1753 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
1754 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1755 chk_flags, control);
1756 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1757 /* See if we can find the re-assembly entity */
1758 if (control != NULL) {
1759 /* We found something, does it belong? */
1760 if (ordered && (mid != control->mid)) {
1761 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1763 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1764 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1765 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1769 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1770 /* We can't have a switched order with an
1771 * unordered chunk */
1772 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1776 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1777 /* We can't have a switched unordered with a
1779 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1786 * Its a complete segment. Lets validate we don't have a
1787 * re-assembly going on with the same Stream/Seq (for
1788 * ordered) or in the same Stream for unordered.
1790 if (control != NULL) {
1791 if (ordered || asoc->idata_supported) {
1792 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1794 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1797 if ((tsn == control->fsn_included + 1) &&
1798 (control->end_added == 0)) {
1799 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1807 /* now do the tests */
1808 if (((asoc->cnt_on_all_streams +
1809 asoc->cnt_on_reasm_queue +
1810 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1811 (((int)asoc->my_rwnd) <= 0)) {
1813 * When we have NO room in the rwnd we check to make sure
1814 * the reader is doing its job...
1816 if (stcb->sctp_socket->so_rcv.sb_cc) {
1817 /* some to read, wake-up */
1818 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1821 so = SCTP_INP_SO(stcb->sctp_ep);
1822 atomic_add_int(&stcb->asoc.refcnt, 1);
1823 SCTP_TCB_UNLOCK(stcb);
1824 SCTP_SOCKET_LOCK(so, 1);
1825 SCTP_TCB_LOCK(stcb);
1826 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1827 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1828 /* assoc was freed while we were unlocked */
1829 SCTP_SOCKET_UNLOCK(so, 1);
1833 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1834 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1835 SCTP_SOCKET_UNLOCK(so, 1);
1838 /* now is it in the mapping array of what we have accepted? */
1839 if (chk_type == SCTP_DATA) {
1840 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1841 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1842 /* Nope not in the valid range dump it */
1844 sctp_set_rwnd(stcb, asoc);
1845 if ((asoc->cnt_on_all_streams +
1846 asoc->cnt_on_reasm_queue +
1847 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1848 SCTP_STAT_INCR(sctps_datadropchklmt);
1850 SCTP_STAT_INCR(sctps_datadroprwnd);
1856 if (control == NULL) {
1859 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1864 #ifdef SCTP_ASOCLOG_OF_TSNS
1865 SCTP_TCB_LOCK_ASSERT(stcb);
1866 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1867 asoc->tsn_in_at = 0;
1868 asoc->tsn_in_wrapped = 1;
1870 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1871 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1872 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1873 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1874 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1875 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1876 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1877 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1881 * Before we continue lets validate that we are not being fooled by
1882 * an evil attacker. We can only have Nk chunks based on our TSN
1883 * spread allowed by the mapping array N * 8 bits, so there is no
1884 * way our stream sequence numbers could have wrapped. We of course
1885 * only validate the FIRST fragment so the bit must be set.
1887 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1888 (TAILQ_EMPTY(&asoc->resetHead)) &&
1889 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1890 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1891 /* The incoming sseq is behind where we last delivered? */
1892 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1893 mid, asoc->strmin[sid].last_mid_delivered);
1895 if (asoc->idata_supported) {
1896 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
1897 asoc->strmin[sid].last_mid_delivered,
1902 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1903 (uint16_t) asoc->strmin[sid].last_mid_delivered,
1908 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1909 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1910 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1914 if (chk_type == SCTP_IDATA) {
1915 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1917 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1919 if (last_chunk == 0) {
1920 if (chk_type == SCTP_IDATA) {
1921 dmbuf = SCTP_M_COPYM(*m,
1922 (offset + sizeof(struct sctp_idata_chunk)),
1925 dmbuf = SCTP_M_COPYM(*m,
1926 (offset + sizeof(struct sctp_data_chunk)),
1929 #ifdef SCTP_MBUF_LOGGING
1930 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1931 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1935 /* We can steal the last chunk */
1939 /* lop off the top part */
1940 if (chk_type == SCTP_IDATA) {
1941 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1943 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1945 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1946 l_len = SCTP_BUF_LEN(dmbuf);
1949 * need to count up the size hopefully does not hit
1955 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1956 l_len += SCTP_BUF_LEN(lat);
1959 if (l_len > the_len) {
1960 /* Trim the end round bytes off too */
1961 m_adj(dmbuf, -(l_len - the_len));
1964 if (dmbuf == NULL) {
1965 SCTP_STAT_INCR(sctps_nomem);
1969 * Now no matter what, we need a control, get one if we don't have
1970 * one (we may have gotten it above when we found the message was
1973 if (control == NULL) {
1974 sctp_alloc_a_readq(stcb, control);
1975 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1980 if (control == NULL) {
1981 SCTP_STAT_INCR(sctps_nomem);
1984 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1985 control->data = dmbuf;
1986 control->tail_mbuf = NULL;
1987 control->end_added = 1;
1988 control->last_frag_seen = 1;
1989 control->first_frag_seen = 1;
1990 control->fsn_included = fsn;
1991 control->top_fsn = fsn;
1993 created_control = 1;
1995 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
1996 chk_flags, ordered, mid, control);
1997 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1998 TAILQ_EMPTY(&asoc->resetHead) &&
2000 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2001 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2002 /* Candidate for express delivery */
2004 * Its not fragmented, No PD-API is up, Nothing in the
2005 * delivery queue, Its un-ordered OR ordered and the next to
2006 * deliver AND nothing else is stuck on the stream queue,
2007 * And there is room for it in the socket buffer. Lets just
2008 * stuff it up the buffer....
2010 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2011 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2012 asoc->highest_tsn_inside_nr_map = tsn;
2014 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2017 sctp_add_to_readq(stcb->sctp_ep, stcb,
2018 control, &stcb->sctp_socket->so_rcv,
2019 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2021 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2022 /* for ordered, bump what we delivered */
2023 strm->last_mid_delivered++;
2025 SCTP_STAT_INCR(sctps_recvexpress);
2026 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2027 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2028 SCTP_STR_LOG_FROM_EXPRS_DEL);
2031 goto finish_express_del;
2033 /* Now will we need a chunk too? */
2034 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2035 sctp_alloc_a_chunk(stcb, chk);
2037 /* No memory so we drop the chunk */
2038 SCTP_STAT_INCR(sctps_nomem);
2039 if (last_chunk == 0) {
2040 /* we copied it, free the copy */
2041 sctp_m_freem(dmbuf);
2045 chk->rec.data.tsn = tsn;
2046 chk->no_fr_allowed = 0;
2047 chk->rec.data.fsn = fsn;
2048 chk->rec.data.mid = mid;
2049 chk->rec.data.sid = sid;
2050 chk->rec.data.ppid = ppid;
2051 chk->rec.data.context = stcb->asoc.context;
2052 chk->rec.data.doing_fast_retransmit = 0;
2053 chk->rec.data.rcv_flags = chk_flags;
2055 chk->send_size = the_len;
2057 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2060 atomic_add_int(&net->ref_count, 1);
2063 /* Set the appropriate TSN mark */
2064 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2065 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2066 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2067 asoc->highest_tsn_inside_nr_map = tsn;
2070 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2071 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2072 asoc->highest_tsn_inside_map = tsn;
2075 /* Now is it complete (i.e. not fragmented)? */
2076 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2078 * Special check for when streams are resetting. We could be
2079 * more smart about this and check the actual stream to see
2080 * if it is not being reset.. that way we would not create a
2081 * HOLB when amongst streams being reset and those not being
2085 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2086 SCTP_TSN_GT(tsn, liste->tsn)) {
2088 * yep its past where we need to reset... go ahead
2091 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2093 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2095 struct sctp_queued_to_read *ctlOn, *nctlOn;
2096 unsigned char inserted = 0;
2098 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2099 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2104 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2109 if (inserted == 0) {
2111 * must be put at end, use prevP
2112 * (all setup from loop) to setup
2115 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2118 goto finish_express_del;
2120 if (chk_flags & SCTP_DATA_UNORDERED) {
2121 /* queue directly into socket buffer */
2122 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2124 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2125 sctp_add_to_readq(stcb->sctp_ep, stcb,
2127 &stcb->sctp_socket->so_rcv, 1,
2128 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2131 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2133 sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2141 goto finish_express_del;
2143 /* If we reach here its a reassembly */
2144 need_reasm_check = 1;
2145 SCTPDBG(SCTP_DEBUG_XXX,
2146 "Queue data to stream for reasm control: %p MID: %u\n",
2148 sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2151 * the assoc is now gone and chk was put onto the reasm
2152 * queue, which has all been freed.
2160 /* Here we tidy up things */
2161 if (tsn == (asoc->cumulative_tsn + 1)) {
2162 /* Update cum-ack */
2163 asoc->cumulative_tsn = tsn;
2169 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2171 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2173 SCTP_STAT_INCR(sctps_recvdata);
2174 /* Set it present please */
2175 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2176 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2178 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2179 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2180 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2182 /* check the special flag for stream resets */
2183 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2184 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2186 * we have finished working through the backlogged TSN's now
2187 * time to reset streams. 1: call reset function. 2: free
2188 * pending_reply space 3: distribute any chunks in
2189 * pending_reply_queue.
2191 struct sctp_queued_to_read *ctl, *nctl;
2193 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2194 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2195 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2196 SCTP_FREE(liste, SCTP_M_STRESET);
2197 /* sa_ignore FREED_MEMORY */
2198 liste = TAILQ_FIRST(&asoc->resetHead);
2199 if (TAILQ_EMPTY(&asoc->resetHead)) {
2200 /* All can be removed */
2201 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2202 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2203 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2209 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2210 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2214 * if ctl->sinfo_tsn is <= liste->tsn we can
2215 * process it which is the NOT of
2216 * ctl->sinfo_tsn > liste->tsn
2218 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2219 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2226 * Now service re-assembly to pick up anything that has been
2227 * held on reassembly queue?
2229 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2230 need_reasm_check = 0;
2232 if (need_reasm_check) {
2233 /* Another one waits ? */
2234 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2239 static const int8_t sctp_map_lookup_tab[256] = {
2240 0, 1, 0, 2, 0, 1, 0, 3,
2241 0, 1, 0, 2, 0, 1, 0, 4,
2242 0, 1, 0, 2, 0, 1, 0, 3,
2243 0, 1, 0, 2, 0, 1, 0, 5,
2244 0, 1, 0, 2, 0, 1, 0, 3,
2245 0, 1, 0, 2, 0, 1, 0, 4,
2246 0, 1, 0, 2, 0, 1, 0, 3,
2247 0, 1, 0, 2, 0, 1, 0, 6,
2248 0, 1, 0, 2, 0, 1, 0, 3,
2249 0, 1, 0, 2, 0, 1, 0, 4,
2250 0, 1, 0, 2, 0, 1, 0, 3,
2251 0, 1, 0, 2, 0, 1, 0, 5,
2252 0, 1, 0, 2, 0, 1, 0, 3,
2253 0, 1, 0, 2, 0, 1, 0, 4,
2254 0, 1, 0, 2, 0, 1, 0, 3,
2255 0, 1, 0, 2, 0, 1, 0, 7,
2256 0, 1, 0, 2, 0, 1, 0, 3,
2257 0, 1, 0, 2, 0, 1, 0, 4,
2258 0, 1, 0, 2, 0, 1, 0, 3,
2259 0, 1, 0, 2, 0, 1, 0, 5,
2260 0, 1, 0, 2, 0, 1, 0, 3,
2261 0, 1, 0, 2, 0, 1, 0, 4,
2262 0, 1, 0, 2, 0, 1, 0, 3,
2263 0, 1, 0, 2, 0, 1, 0, 6,
2264 0, 1, 0, 2, 0, 1, 0, 3,
2265 0, 1, 0, 2, 0, 1, 0, 4,
2266 0, 1, 0, 2, 0, 1, 0, 3,
2267 0, 1, 0, 2, 0, 1, 0, 5,
2268 0, 1, 0, 2, 0, 1, 0, 3,
2269 0, 1, 0, 2, 0, 1, 0, 4,
2270 0, 1, 0, 2, 0, 1, 0, 3,
2271 0, 1, 0, 2, 0, 1, 0, 8
2276 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2279 * Now we also need to check the mapping array in a couple of ways.
2280 * 1) Did we move the cum-ack point?
2282 * When you first glance at this you might think that all entries
2283 * that make up the position of the cum-ack would be in the
2284 * nr-mapping array only.. i.e. things up to the cum-ack are always
2285 * deliverable. Thats true with one exception, when its a fragmented
2286 * message we may not deliver the data until some threshold (or all
2287 * of it) is in place. So we must OR the nr_mapping_array and
2288 * mapping_array to get a true picture of the cum-ack.
2290 struct sctp_association *asoc;
2293 int slide_from, slide_end, lgap, distance;
2294 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2298 old_cumack = asoc->cumulative_tsn;
2299 old_base = asoc->mapping_array_base_tsn;
2300 old_highest = asoc->highest_tsn_inside_map;
2302 * We could probably improve this a small bit by calculating the
2303 * offset of the current cum-ack as the starting point.
2306 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2307 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2311 /* there is a 0 bit */
2312 at += sctp_map_lookup_tab[val];
2316 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2318 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2319 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2321 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2322 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2324 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2325 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2326 sctp_print_mapping_array(asoc);
2327 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2328 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2330 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2331 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2334 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2335 highest_tsn = asoc->highest_tsn_inside_nr_map;
2337 highest_tsn = asoc->highest_tsn_inside_map;
2339 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2340 /* The complete array was completed by a single FR */
2341 /* highest becomes the cum-ack */
2347 /* clear the array */
2348 clr = ((at + 7) >> 3);
2349 if (clr > asoc->mapping_array_size) {
2350 clr = asoc->mapping_array_size;
2352 memset(asoc->mapping_array, 0, clr);
2353 memset(asoc->nr_mapping_array, 0, clr);
2355 for (i = 0; i < asoc->mapping_array_size; i++) {
2356 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2357 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2358 sctp_print_mapping_array(asoc);
2362 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2363 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2364 } else if (at >= 8) {
2365 /* we can slide the mapping array down */
2366 /* slide_from holds where we hit the first NON 0xff byte */
2369 * now calculate the ceiling of the move using our highest
2372 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2373 slide_end = (lgap >> 3);
2374 if (slide_end < slide_from) {
2375 sctp_print_mapping_array(asoc);
2377 panic("impossible slide");
2379 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2380 lgap, slide_end, slide_from, at);
2384 if (slide_end > asoc->mapping_array_size) {
2386 panic("would overrun buffer");
2388 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2389 asoc->mapping_array_size, slide_end);
2390 slide_end = asoc->mapping_array_size;
2393 distance = (slide_end - slide_from) + 1;
2394 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2395 sctp_log_map(old_base, old_cumack, old_highest,
2396 SCTP_MAP_PREPARE_SLIDE);
2397 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2398 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2400 if (distance + slide_from > asoc->mapping_array_size ||
2403 * Here we do NOT slide forward the array so that
2404 * hopefully when more data comes in to fill it up
2405 * we will be able to slide it forward. Really I
2406 * don't think this should happen :-0
2409 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2410 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2411 (uint32_t) asoc->mapping_array_size,
2412 SCTP_MAP_SLIDE_NONE);
2417 for (ii = 0; ii < distance; ii++) {
2418 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2419 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2422 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2423 asoc->mapping_array[ii] = 0;
2424 asoc->nr_mapping_array[ii] = 0;
2426 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2427 asoc->highest_tsn_inside_map += (slide_from << 3);
2429 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2430 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2432 asoc->mapping_array_base_tsn += (slide_from << 3);
2433 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2434 sctp_log_map(asoc->mapping_array_base_tsn,
2435 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2436 SCTP_MAP_SLIDE_RESULT);
2443 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2445 struct sctp_association *asoc;
2446 uint32_t highest_tsn;
2449 sctp_slide_mapping_arrays(stcb);
2451 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2452 highest_tsn = asoc->highest_tsn_inside_nr_map;
2454 highest_tsn = asoc->highest_tsn_inside_map;
2456 /* Is there a gap now? */
2457 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2460 * Now we need to see if we need to queue a sack or just start the
2461 * timer (if allowed).
2463 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2465 * Ok special case, in SHUTDOWN-SENT case. here we maker
2466 * sure SACK timer is off and instead send a SHUTDOWN and a
2469 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2470 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2471 stcb->sctp_ep, stcb, NULL,
2472 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2474 sctp_send_shutdown(stcb,
2475 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2477 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2481 * CMT DAC algorithm: increase number of packets received
2484 stcb->asoc.cmt_dac_pkts_rcvd++;
2486 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2488 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2490 (stcb->asoc.numduptsns) || /* we have dup's */
2491 (is_a_gap) || /* is still a gap */
2492 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2493 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ ) {
2495 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2496 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2497 (stcb->asoc.send_sack == 0) &&
2498 (stcb->asoc.numduptsns == 0) &&
2499 (stcb->asoc.delayed_ack) &&
2500 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2503 * CMT DAC algorithm: With CMT, delay acks
2504 * even in the face of
2506 * reordering. Therefore, if acks that do
2507 * not have to be sent because of the above
2508 * reasons, will be delayed. That is, acks
2509 * that would have been sent due to gap
2510 * reports will be delayed with DAC. Start
2511 * the delayed ack timer.
2513 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2514 stcb->sctp_ep, stcb, NULL);
2517 * Ok we must build a SACK since the timer
2518 * is pending, we got our first packet OR
2519 * there are gaps or duplicates.
2521 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2522 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2525 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2526 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2527 stcb->sctp_ep, stcb, NULL);
2534 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2535 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2536 struct sctp_nets *net, uint32_t * high_tsn)
2538 struct sctp_chunkhdr *ch, chunk_buf;
2539 struct sctp_association *asoc;
2540 int num_chunks = 0; /* number of control chunks processed */
2542 int chk_length, break_flag, last_chunk;
2543 int abort_flag = 0, was_a_gap;
2545 uint32_t highest_tsn;
2548 sctp_set_rwnd(stcb, &stcb->asoc);
2551 SCTP_TCB_LOCK_ASSERT(stcb);
2553 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2554 highest_tsn = asoc->highest_tsn_inside_nr_map;
2556 highest_tsn = asoc->highest_tsn_inside_map;
2558 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2560 * setup where we got the last DATA packet from for any SACK that
2561 * may need to go out. Don't bump the net. This is done ONLY when a
2562 * chunk is assigned.
2564 asoc->last_data_chunk_from = net;
2567 * Now before we proceed we must figure out if this is a wasted
2568 * cluster... i.e. it is a small packet sent in and yet the driver
2569 * underneath allocated a full cluster for it. If so we must copy it
2570 * to a smaller mbuf and free up the cluster mbuf. This will help
2571 * with cluster starvation. Note for __Panda__ we don't do this
2572 * since it has clusters all the way down to 64 bytes.
2574 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2575 /* we only handle mbufs that are singletons.. not chains */
2576 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2578 /* ok lets see if we can copy the data up */
2581 /* get the pointers and copy */
2582 to = mtod(m, caddr_t *);
2583 from = mtod((*mm), caddr_t *);
2584 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2585 /* copy the length and free up the old */
2586 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2588 /* success, back copy */
2591 /* We are in trouble in the mbuf world .. yikes */
2595 /* get pointer to the first chunk header */
2596 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2597 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2602 * process all DATA chunks...
2604 *high_tsn = asoc->cumulative_tsn;
2606 asoc->data_pkts_seen++;
2607 while (stop_proc == 0) {
2608 /* validate chunk length */
2609 chk_length = ntohs(ch->chunk_length);
2610 if (length - *offset < chk_length) {
2611 /* all done, mutulated chunk */
2615 if ((asoc->idata_supported == 1) &&
2616 (ch->chunk_type == SCTP_DATA)) {
2617 struct mbuf *op_err;
2618 char msg[SCTP_DIAG_INFO_LEN];
2620 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2621 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2622 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2623 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2626 if ((asoc->idata_supported == 0) &&
2627 (ch->chunk_type == SCTP_IDATA)) {
2628 struct mbuf *op_err;
2629 char msg[SCTP_DIAG_INFO_LEN];
2631 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2632 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2633 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2634 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2637 if ((ch->chunk_type == SCTP_DATA) ||
2638 (ch->chunk_type == SCTP_IDATA)) {
2641 if (ch->chunk_type == SCTP_DATA) {
2642 clen = sizeof(struct sctp_data_chunk);
2644 clen = sizeof(struct sctp_idata_chunk);
2646 if (chk_length < clen) {
2648 * Need to send an abort since we had a
2649 * invalid data chunk.
2651 struct mbuf *op_err;
2652 char msg[SCTP_DIAG_INFO_LEN];
2654 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2656 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2657 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2658 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2661 #ifdef SCTP_AUDITING_ENABLED
2662 sctp_audit_log(0xB1, 0);
2664 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2669 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2670 chk_length, net, high_tsn, &abort_flag, &break_flag,
2671 last_chunk, ch->chunk_type)) {
2679 * Set because of out of rwnd space and no
2680 * drop rep space left.
2686 /* not a data chunk in the data region */
2687 switch (ch->chunk_type) {
2688 case SCTP_INITIATION:
2689 case SCTP_INITIATION_ACK:
2690 case SCTP_SELECTIVE_ACK:
2691 case SCTP_NR_SELECTIVE_ACK:
2692 case SCTP_HEARTBEAT_REQUEST:
2693 case SCTP_HEARTBEAT_ACK:
2694 case SCTP_ABORT_ASSOCIATION:
2696 case SCTP_SHUTDOWN_ACK:
2697 case SCTP_OPERATION_ERROR:
2698 case SCTP_COOKIE_ECHO:
2699 case SCTP_COOKIE_ACK:
2702 case SCTP_SHUTDOWN_COMPLETE:
2703 case SCTP_AUTHENTICATION:
2704 case SCTP_ASCONF_ACK:
2705 case SCTP_PACKET_DROPPED:
2706 case SCTP_STREAM_RESET:
2707 case SCTP_FORWARD_CUM_TSN:
2711 * Now, what do we do with KNOWN
2712 * chunks that are NOT in the right
2715 * For now, I do nothing but ignore
2716 * them. We may later want to add
2717 * sysctl stuff to switch out and do
2718 * either an ABORT() or possibly
2721 struct mbuf *op_err;
2722 char msg[SCTP_DIAG_INFO_LEN];
2724 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2726 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2727 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2731 /* unknown chunk type, use bit rules */
2732 if (ch->chunk_type & 0x40) {
2733 /* Add a error report to the queue */
2734 struct mbuf *op_err;
2735 struct sctp_gen_error_cause *cause;
2737 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2738 0, M_NOWAIT, 1, MT_DATA);
2739 if (op_err != NULL) {
2740 cause = mtod(op_err, struct sctp_gen_error_cause *);
2741 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2742 cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2743 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2744 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2745 if (SCTP_BUF_NEXT(op_err) != NULL) {
2746 sctp_queue_op_err(stcb, op_err);
2748 sctp_m_freem(op_err);
2752 if ((ch->chunk_type & 0x80) == 0) {
2753 /* discard the rest of this packet */
2755 } /* else skip this bad chunk and
2756 * continue... */ break;
2757 } /* switch of chunk type */
2759 *offset += SCTP_SIZE32(chk_length);
2760 if ((*offset >= length) || stop_proc) {
2761 /* no more data left in the mbuf chain */
2765 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2766 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2775 * we need to report rwnd overrun drops.
2777 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2781 * Did we get data, if so update the time for auto-close and
2782 * give peer credit for being alive.
2784 SCTP_STAT_INCR(sctps_recvpktwithdata);
2785 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2786 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2787 stcb->asoc.overall_error_count,
2789 SCTP_FROM_SCTP_INDATA,
2792 stcb->asoc.overall_error_count = 0;
2793 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2795 /* now service all of the reassm queue if needed */
2796 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2797 /* Assure that we ack right away */
2798 stcb->asoc.send_sack = 1;
2800 /* Start a sack timer or QUEUE a SACK for sending */
2801 sctp_sack_check(stcb, was_a_gap);
2806 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2807 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2809 uint32_t * biggest_newly_acked_tsn,
2810 uint32_t * this_sack_lowest_newack,
2813 struct sctp_tmit_chunk *tp1;
2814 unsigned int theTSN;
2815 int j, wake_him = 0, circled = 0;
2817 /* Recover the tp1 we last saw */
2820 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2822 for (j = frag_strt; j <= frag_end; j++) {
2823 theTSN = j + last_tsn;
2825 if (tp1->rec.data.doing_fast_retransmit)
2829 * CMT: CUCv2 algorithm. For each TSN being
2830 * processed from the sent queue, track the
2831 * next expected pseudo-cumack, or
2832 * rtx_pseudo_cumack, if required. Separate
2833 * cumack trackers for first transmissions,
2834 * and retransmissions.
2836 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2837 (tp1->whoTo->find_pseudo_cumack == 1) &&
2838 (tp1->snd_count == 1)) {
2839 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2840 tp1->whoTo->find_pseudo_cumack = 0;
2842 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2843 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2844 (tp1->snd_count > 1)) {
2845 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2846 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2848 if (tp1->rec.data.tsn == theTSN) {
2849 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2851 * must be held until
2854 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2856 * If it is less than RESEND, it is
2857 * now no-longer in flight.
2858 * Higher values may already be set
2859 * via previous Gap Ack Blocks...
2860 * i.e. ACKED or RESEND.
2862 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2863 *biggest_newly_acked_tsn)) {
2864 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
2867 * CMT: SFR algo (and HTNA) - set
2868 * saw_newack to 1 for dest being
2869 * newly acked. update
2870 * this_sack_highest_newack if
2873 if (tp1->rec.data.chunk_was_revoked == 0)
2874 tp1->whoTo->saw_newack = 1;
2876 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2877 tp1->whoTo->this_sack_highest_newack)) {
2878 tp1->whoTo->this_sack_highest_newack =
2882 * CMT DAC algo: also update
2883 * this_sack_lowest_newack
2885 if (*this_sack_lowest_newack == 0) {
2886 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2887 sctp_log_sack(*this_sack_lowest_newack,
2892 SCTP_LOG_TSN_ACKED);
2894 *this_sack_lowest_newack = tp1->rec.data.tsn;
2897 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2898 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2899 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2900 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2901 * Separate pseudo_cumack trackers for first transmissions and
2904 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
2905 if (tp1->rec.data.chunk_was_revoked == 0) {
2906 tp1->whoTo->new_pseudo_cumack = 1;
2908 tp1->whoTo->find_pseudo_cumack = 1;
2910 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2911 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
2913 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
2914 if (tp1->rec.data.chunk_was_revoked == 0) {
2915 tp1->whoTo->new_pseudo_cumack = 1;
2917 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2919 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2920 sctp_log_sack(*biggest_newly_acked_tsn,
2925 SCTP_LOG_TSN_ACKED);
2927 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2928 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2929 tp1->whoTo->flight_size,
2931 (uint32_t) (uintptr_t) tp1->whoTo,
2934 sctp_flight_size_decrease(tp1);
2935 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2936 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2939 sctp_total_flight_decrease(stcb, tp1);
2941 tp1->whoTo->net_ack += tp1->send_size;
2942 if (tp1->snd_count < 2) {
2944 * True non-retransmited chunk
2946 tp1->whoTo->net_ack2 += tp1->send_size;
2954 sctp_calculate_rto(stcb,
2957 &tp1->sent_rcv_time,
2958 sctp_align_safe_nocopy,
2959 SCTP_RTT_FROM_DATA);
2962 if (tp1->whoTo->rto_needed == 0) {
2963 tp1->whoTo->rto_needed = 1;
2969 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2970 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2971 stcb->asoc.this_sack_highest_gap)) {
2972 stcb->asoc.this_sack_highest_gap =
2975 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2976 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2977 #ifdef SCTP_AUDITING_ENABLED
2978 sctp_audit_log(0xB2,
2979 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2984 * All chunks NOT UNSENT fall through here and are marked
2985 * (leave PR-SCTP ones that are to skip alone though)
2987 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2988 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2989 tp1->sent = SCTP_DATAGRAM_MARKED;
2991 if (tp1->rec.data.chunk_was_revoked) {
2992 /* deflate the cwnd */
2993 tp1->whoTo->cwnd -= tp1->book_size;
2994 tp1->rec.data.chunk_was_revoked = 0;
2996 /* NR Sack code here */
2998 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2999 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3000 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3003 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3006 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3007 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3008 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3009 stcb->asoc.trigger_reset = 1;
3011 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3015 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3016 sctp_m_freem(tp1->data);
3023 } /* if (tp1->tsn == theTSN) */ if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3026 tp1 = TAILQ_NEXT(tp1, sctp_next);
3027 if ((tp1 == NULL) && (circled == 0)) {
3029 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3031 } /* end while (tp1) */
3034 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3036 /* In case the fragments were not in order we must reset */
3037 } /* end for (j = fragStart */
3039 return (wake_him); /* Return value only used for nr-sack */
3044 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3045 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3046 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3047 int num_seg, int num_nr_seg, int *rto_ok)
3049 struct sctp_gap_ack_block *frag, block;
3050 struct sctp_tmit_chunk *tp1;
3055 uint16_t frag_strt, frag_end, prev_frag_end;
3057 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3061 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3064 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3066 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3067 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3068 *offset += sizeof(block);
3070 return (chunk_freed);
3072 frag_strt = ntohs(frag->start);
3073 frag_end = ntohs(frag->end);
3075 if (frag_strt > frag_end) {
3076 /* This gap report is malformed, skip it. */
3079 if (frag_strt <= prev_frag_end) {
3080 /* This gap report is not in order, so restart. */
3081 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3083 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3084 *biggest_tsn_acked = last_tsn + frag_end;
3091 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3092 non_revocable, &num_frs, biggest_newly_acked_tsn,
3093 this_sack_lowest_newack, rto_ok)) {
3096 prev_frag_end = frag_end;
3098 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3100 sctp_log_fr(*biggest_tsn_acked,
3101 *biggest_newly_acked_tsn,
3102 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3104 return (chunk_freed);
3108 sctp_check_for_revoked(struct sctp_tcb *stcb,
3109 struct sctp_association *asoc, uint32_t cumack,
3110 uint32_t biggest_tsn_acked)
3112 struct sctp_tmit_chunk *tp1;
3114 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3115 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3117 * ok this guy is either ACK or MARKED. If it is
3118 * ACKED it has been previously acked but not this
3119 * time i.e. revoked. If it is MARKED it was ACK'ed
3122 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3125 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3126 /* it has been revoked */
3127 tp1->sent = SCTP_DATAGRAM_SENT;
3128 tp1->rec.data.chunk_was_revoked = 1;
3130 * We must add this stuff back in to assure
3131 * timers and such get started.
3133 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3134 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3135 tp1->whoTo->flight_size,
3137 (uint32_t) (uintptr_t) tp1->whoTo,
3140 sctp_flight_size_increase(tp1);
3141 sctp_total_flight_increase(stcb, tp1);
3143 * We inflate the cwnd to compensate for our
3144 * artificial inflation of the flight_size.
3146 tp1->whoTo->cwnd += tp1->book_size;
3147 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3148 sctp_log_sack(asoc->last_acked_seq,
3153 SCTP_LOG_TSN_REVOKED);
3155 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3156 /* it has been re-acked in this SACK */
3157 tp1->sent = SCTP_DATAGRAM_ACKED;
3160 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3167 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3168 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3170 struct sctp_tmit_chunk *tp1;
3171 int strike_flag = 0;
3173 int tot_retrans = 0;
3174 uint32_t sending_seq;
3175 struct sctp_nets *net;
3176 int num_dests_sacked = 0;
3179 * select the sending_seq, this is either the next thing ready to be
3180 * sent but not transmitted, OR, the next seq we assign.
3182 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3184 sending_seq = asoc->sending_seq;
3186 sending_seq = tp1->rec.data.tsn;
3189 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3190 if ((asoc->sctp_cmt_on_off > 0) &&
3191 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3192 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3193 if (net->saw_newack)
3197 if (stcb->asoc.prsctp_supported) {
3198 (void)SCTP_GETTIME_TIMEVAL(&now);
3200 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3202 if (tp1->no_fr_allowed) {
3203 /* this one had a timeout or something */
3206 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3207 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3208 sctp_log_fr(biggest_tsn_newly_acked,
3211 SCTP_FR_LOG_CHECK_STRIKE);
3213 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3214 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3218 if (stcb->asoc.prsctp_supported) {
3219 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3220 /* Is it expired? */
3221 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3222 /* Yes so drop it */
3223 if (tp1->data != NULL) {
3224 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3225 SCTP_SO_NOT_LOCKED);
3231 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
3232 /* we are beyond the tsn in the sack */
3235 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3236 /* either a RESEND, ACKED, or MARKED */
3238 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3239 /* Continue strikin FWD-TSN chunks */
3240 tp1->rec.data.fwd_tsn_cnt++;
3245 * CMT : SFR algo (covers part of DAC and HTNA as well)
3247 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3249 * No new acks were receieved for data sent to this
3250 * dest. Therefore, according to the SFR algo for
3251 * CMT, no data sent to this dest can be marked for
3252 * FR using this SACK.
3255 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
3256 tp1->whoTo->this_sack_highest_newack)) {
3258 * CMT: New acks were receieved for data sent to
3259 * this dest. But no new acks were seen for data
3260 * sent after tp1. Therefore, according to the SFR
3261 * algo for CMT, tp1 cannot be marked for FR using
3262 * this SACK. This step covers part of the DAC algo
3263 * and the HTNA algo as well.
3268 * Here we check to see if we were have already done a FR
3269 * and if so we see if the biggest TSN we saw in the sack is
3270 * smaller than the recovery point. If so we don't strike
3271 * the tsn... otherwise we CAN strike the TSN.
3274 * @@@ JRI: Check for CMT if (accum_moved &&
3275 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3278 if (accum_moved && asoc->fast_retran_loss_recovery) {
3280 * Strike the TSN if in fast-recovery and cum-ack
3283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3284 sctp_log_fr(biggest_tsn_newly_acked,
3287 SCTP_FR_LOG_STRIKE_CHUNK);
3289 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3292 if ((asoc->sctp_cmt_on_off > 0) &&
3293 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3295 * CMT DAC algorithm: If SACK flag is set to
3296 * 0, then lowest_newack test will not pass
3297 * because it would have been set to the
3298 * cumack earlier. If not already to be
3299 * rtx'd, If not a mixed sack and if tp1 is
3300 * not between two sacked TSNs, then mark by
3301 * one more. NOTE that we are marking by one
3302 * additional time since the SACK DAC flag
3303 * indicates that two packets have been
3304 * received after this missing TSN.
3306 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3307 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3308 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3309 sctp_log_fr(16 + num_dests_sacked,
3312 SCTP_FR_LOG_STRIKE_CHUNK);
3317 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3318 (asoc->sctp_cmt_on_off == 0)) {
3320 * For those that have done a FR we must take
3321 * special consideration if we strike. I.e the
3322 * biggest_newly_acked must be higher than the
3323 * sending_seq at the time we did the FR.
3326 #ifdef SCTP_FR_TO_ALTERNATE
3328 * If FR's go to new networks, then we must only do
3329 * this for singly homed asoc's. However if the FR's
3330 * go to the same network (Armando's work) then its
3331 * ok to FR multiple times.
3339 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3340 tp1->rec.data.fast_retran_tsn)) {
3342 * Strike the TSN, since this ack is
3343 * beyond where things were when we
3346 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3347 sctp_log_fr(biggest_tsn_newly_acked,
3350 SCTP_FR_LOG_STRIKE_CHUNK);
3352 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3356 if ((asoc->sctp_cmt_on_off > 0) &&
3357 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3359 * CMT DAC algorithm: If
3360 * SACK flag is set to 0,
3361 * then lowest_newack test
3362 * will not pass because it
3363 * would have been set to
3364 * the cumack earlier. If
3365 * not already to be rtx'd,
3366 * If not a mixed sack and
3367 * if tp1 is not between two
3368 * sacked TSNs, then mark by
3369 * one more. NOTE that we
3370 * are marking by one
3371 * additional time since the
3372 * SACK DAC flag indicates
3373 * that two packets have
3374 * been received after this
3377 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3378 (num_dests_sacked == 1) &&
3379 SCTP_TSN_GT(this_sack_lowest_newack,
3380 tp1->rec.data.tsn)) {
3381 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3382 sctp_log_fr(32 + num_dests_sacked,
3385 SCTP_FR_LOG_STRIKE_CHUNK);
3387 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3395 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3398 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3399 biggest_tsn_newly_acked)) {
3401 * We don't strike these: This is the HTNA
3402 * algorithm i.e. we don't strike If our TSN is
3403 * larger than the Highest TSN Newly Acked.
3407 /* Strike the TSN */
3408 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3409 sctp_log_fr(biggest_tsn_newly_acked,
3412 SCTP_FR_LOG_STRIKE_CHUNK);
3414 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3417 if ((asoc->sctp_cmt_on_off > 0) &&
3418 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3420 * CMT DAC algorithm: If SACK flag is set to
3421 * 0, then lowest_newack test will not pass
3422 * because it would have been set to the
3423 * cumack earlier. If not already to be
3424 * rtx'd, If not a mixed sack and if tp1 is
3425 * not between two sacked TSNs, then mark by
3426 * one more. NOTE that we are marking by one
3427 * additional time since the SACK DAC flag
3428 * indicates that two packets have been
3429 * received after this missing TSN.
3431 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3432 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3433 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3434 sctp_log_fr(48 + num_dests_sacked,
3437 SCTP_FR_LOG_STRIKE_CHUNK);
3443 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3444 struct sctp_nets *alt;
3446 /* fix counts and things */
3447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3448 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3449 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3451 (uint32_t) (uintptr_t) tp1->whoTo,
3455 tp1->whoTo->net_ack++;
3456 sctp_flight_size_decrease(tp1);
3457 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3458 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3462 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3463 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3464 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3466 /* add back to the rwnd */
3467 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3469 /* remove from the total flight */
3470 sctp_total_flight_decrease(stcb, tp1);
3472 if ((stcb->asoc.prsctp_supported) &&
3473 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3474 /* Has it been retransmitted tv_sec times? -
3475 * we store the retran count there. */
3476 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3477 /* Yes, so drop it */
3478 if (tp1->data != NULL) {
3479 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3480 SCTP_SO_NOT_LOCKED);
3482 /* Make sure to flag we had a FR */
3483 tp1->whoTo->net_ack++;
3487 /* SCTP_PRINTF("OK, we are now ready to FR this
3489 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3490 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3494 /* This is a subsequent FR */
3495 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3497 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3498 if (asoc->sctp_cmt_on_off > 0) {
3500 * CMT: Using RTX_SSTHRESH policy for CMT.
3501 * If CMT is being used, then pick dest with
3502 * largest ssthresh for any retransmission.
3504 tp1->no_fr_allowed = 1;
3506 /* sa_ignore NO_NULL_CHK */
3507 if (asoc->sctp_cmt_pf > 0) {
3508 /* JRS 5/18/07 - If CMT PF is on,
3509 * use the PF version of
3511 alt = sctp_find_alternate_net(stcb, alt, 2);
3513 /* JRS 5/18/07 - If only CMT is on,
3514 * use the CMT version of
3516 /* sa_ignore NO_NULL_CHK */
3517 alt = sctp_find_alternate_net(stcb, alt, 1);
3523 * CUCv2: If a different dest is picked for
3524 * the retransmission, then new
3525 * (rtx-)pseudo_cumack needs to be tracked
3526 * for orig dest. Let CUCv2 track new (rtx-)
3527 * pseudo-cumack always.
3530 tp1->whoTo->find_pseudo_cumack = 1;
3531 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3533 } else {/* CMT is OFF */
3535 #ifdef SCTP_FR_TO_ALTERNATE
3536 /* Can we find an alternate? */
3537 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3540 * default behavior is to NOT retransmit
3541 * FR's to an alternate. Armando Caro's
3542 * paper details why.
3548 tp1->rec.data.doing_fast_retransmit = 1;
3550 /* mark the sending seq for possible subsequent FR's */
3552 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3553 * (uint32_t)tpi->rec.data.tsn);
3555 if (TAILQ_EMPTY(&asoc->send_queue)) {
3557 * If the queue of send is empty then its
3558 * the next sequence number that will be
3559 * assigned so we subtract one from this to
3560 * get the one we last sent.
3562 tp1->rec.data.fast_retran_tsn = sending_seq;
3565 * If there are chunks on the send queue
3566 * (unsent data that has made it from the
3567 * stream queues but not out the door, we
3568 * take the first one (which will have the
3569 * lowest TSN) and subtract one to get the
3572 struct sctp_tmit_chunk *ttt;
3574 ttt = TAILQ_FIRST(&asoc->send_queue);
3575 tp1->rec.data.fast_retran_tsn =
3581 * this guy had a RTO calculation pending on
3584 if ((tp1->whoTo != NULL) &&
3585 (tp1->whoTo->rto_needed == 0)) {
3586 tp1->whoTo->rto_needed = 1;
3590 if (alt != tp1->whoTo) {
3591 /* yes, there is an alternate. */
3592 sctp_free_remote_addr(tp1->whoTo);
3593 /* sa_ignore FREED_MEMORY */
3595 atomic_add_int(&alt->ref_count, 1);
3601 struct sctp_tmit_chunk *
3602 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3603 struct sctp_association *asoc)
3605 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3609 if (asoc->prsctp_supported == 0) {
3612 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3613 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3614 tp1->sent != SCTP_DATAGRAM_RESEND &&
3615 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3616 /* no chance to advance, out of here */
3619 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3620 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3621 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3622 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3623 asoc->advanced_peer_ack_point,
3624 tp1->rec.data.tsn, 0, 0);
3627 if (!PR_SCTP_ENABLED(tp1->flags)) {
3629 * We can't fwd-tsn past any that are reliable aka
3630 * retransmitted until the asoc fails.
3635 (void)SCTP_GETTIME_TIMEVAL(&now);
3639 * now we got a chunk which is marked for another
3640 * retransmission to a PR-stream but has run out its chances
3641 * already maybe OR has been marked to skip now. Can we skip
3642 * it if its a resend?
3644 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3645 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3647 * Now is this one marked for resend and its time is
3650 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3651 /* Yes so drop it */
3653 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3654 1, SCTP_SO_NOT_LOCKED);
3658 * No, we are done when hit one for resend
3659 * whos time as not expired.
3665 * Ok now if this chunk is marked to drop it we can clean up
3666 * the chunk, advance our peer ack point and we can check
3669 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3670 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3671 /* advance PeerAckPoint goes forward */
3672 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3673 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3675 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3676 /* No update but we do save the chk */
3681 * If it is still in RESEND we can advance no
3691 sctp_fs_audit(struct sctp_association *asoc)
3693 struct sctp_tmit_chunk *chk;
3694 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3697 int entry_flight, entry_cnt;
3702 entry_flight = asoc->total_flight;
3703 entry_cnt = asoc->total_flight_count;
3705 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3708 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3709 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3710 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3715 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3717 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3719 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3726 if ((inflight > 0) || (inbetween > 0)) {
3728 panic("Flight size-express incorrect? \n");
3730 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3731 entry_flight, entry_cnt);
3733 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3734 inflight, inbetween, resend, above, acked);
3743 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3744 struct sctp_association *asoc,
3745 struct sctp_tmit_chunk *tp1)
3747 tp1->window_probe = 0;
3748 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3749 /* TSN's skipped we do NOT move back. */
3750 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3751 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3753 (uint32_t) (uintptr_t) tp1->whoTo,
3757 /* First setup this by shrinking flight */
3758 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3759 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3762 sctp_flight_size_decrease(tp1);
3763 sctp_total_flight_decrease(stcb, tp1);
3764 /* Now mark for resend */
3765 tp1->sent = SCTP_DATAGRAM_RESEND;
3766 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3768 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3769 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3770 tp1->whoTo->flight_size,
3772 (uint32_t) (uintptr_t) tp1->whoTo,
3778 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3779 uint32_t rwnd, int *abort_now, int ecne_seen)
3781 struct sctp_nets *net;
3782 struct sctp_association *asoc;
3783 struct sctp_tmit_chunk *tp1, *tp2;
3785 int win_probe_recovery = 0;
3786 int win_probe_recovered = 0;
3787 int j, done_once = 0;
3791 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3792 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3793 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3795 SCTP_TCB_LOCK_ASSERT(stcb);
3796 #ifdef SCTP_ASOCLOG_OF_TSNS
3797 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3798 stcb->asoc.cumack_log_at++;
3799 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3800 stcb->asoc.cumack_log_at = 0;
3804 old_rwnd = asoc->peers_rwnd;
3805 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3808 } else if (asoc->last_acked_seq == cumack) {
3809 /* Window update sack */
3810 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3811 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3812 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3813 /* SWS sender side engages */
3814 asoc->peers_rwnd = 0;
3816 if (asoc->peers_rwnd > old_rwnd) {
3821 /* First setup for CC stuff */
3822 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3823 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3824 /* Drag along the window_tsn for cwr's */
3825 net->cwr_window_tsn = cumack;
3827 net->prev_cwnd = net->cwnd;
3832 * CMT: Reset CUC and Fast recovery algo variables before
3835 net->new_pseudo_cumack = 0;
3836 net->will_exit_fast_recovery = 0;
3837 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3838 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3841 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3842 tp1 = TAILQ_LAST(&asoc->sent_queue,
3843 sctpchunk_listhead);
3844 send_s = tp1->rec.data.tsn + 1;
3846 send_s = asoc->sending_seq;
3848 if (SCTP_TSN_GE(cumack, send_s)) {
3849 struct mbuf *op_err;
3850 char msg[SCTP_DIAG_INFO_LEN];
3854 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3856 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3857 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3858 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3861 asoc->this_sack_highest_gap = cumack;
3862 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3863 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3864 stcb->asoc.overall_error_count,
3866 SCTP_FROM_SCTP_INDATA,
3869 stcb->asoc.overall_error_count = 0;
3870 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3871 /* process the new consecutive TSN first */
3872 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3873 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
3874 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3875 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3877 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3879 * If it is less than ACKED, it is
3880 * now no-longer in flight. Higher
3881 * values may occur during marking
3883 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3884 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3885 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3886 tp1->whoTo->flight_size,
3888 (uint32_t) (uintptr_t) tp1->whoTo,
3891 sctp_flight_size_decrease(tp1);
3892 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3893 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3896 /* sa_ignore NO_NULL_CHK */
3897 sctp_total_flight_decrease(stcb, tp1);
3899 tp1->whoTo->net_ack += tp1->send_size;
3900 if (tp1->snd_count < 2) {
3902 * True non-retransmited
3905 tp1->whoTo->net_ack2 +=
3908 /* update RTO too? */
3916 sctp_calculate_rto(stcb,
3918 &tp1->sent_rcv_time,
3919 sctp_align_safe_nocopy,
3920 SCTP_RTT_FROM_DATA);
3923 if (tp1->whoTo->rto_needed == 0) {
3924 tp1->whoTo->rto_needed = 1;
3930 * CMT: CUCv2 algorithm. From the
3931 * cumack'd TSNs, for each TSN being
3932 * acked for the first time, set the
3933 * following variables for the
3934 * corresp destination.
3935 * new_pseudo_cumack will trigger a
3937 * find_(rtx_)pseudo_cumack will
3938 * trigger search for the next
3939 * expected (rtx-)pseudo-cumack.
3941 tp1->whoTo->new_pseudo_cumack = 1;
3942 tp1->whoTo->find_pseudo_cumack = 1;
3943 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3945 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3946 /* sa_ignore NO_NULL_CHK */
3947 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3950 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3951 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3953 if (tp1->rec.data.chunk_was_revoked) {
3954 /* deflate the cwnd */
3955 tp1->whoTo->cwnd -= tp1->book_size;
3956 tp1->rec.data.chunk_was_revoked = 0;
3958 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3959 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3960 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
3963 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3967 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3968 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3969 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
3970 asoc->trigger_reset = 1;
3972 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3974 /* sa_ignore NO_NULL_CHK */
3975 sctp_free_bufspace(stcb, asoc, tp1, 1);
3976 sctp_m_freem(tp1->data);
3979 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3980 sctp_log_sack(asoc->last_acked_seq,
3985 SCTP_LOG_FREE_SENT);
3987 asoc->sent_queue_cnt--;
3988 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3995 /* sa_ignore NO_NULL_CHK */
3996 if (stcb->sctp_socket) {
3997 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4001 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4002 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4003 /* sa_ignore NO_NULL_CHK */
4004 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4006 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4007 so = SCTP_INP_SO(stcb->sctp_ep);
4008 atomic_add_int(&stcb->asoc.refcnt, 1);
4009 SCTP_TCB_UNLOCK(stcb);
4010 SCTP_SOCKET_LOCK(so, 1);
4011 SCTP_TCB_LOCK(stcb);
4012 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4013 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4014 /* assoc was freed while we were unlocked */
4015 SCTP_SOCKET_UNLOCK(so, 1);
4019 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4020 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4021 SCTP_SOCKET_UNLOCK(so, 1);
4024 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4025 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4029 /* JRS - Use the congestion control given in the CC module */
4030 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4031 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4032 if (net->net_ack2 > 0) {
4034 * Karn's rule applies to clearing error
4035 * count, this is optional.
4037 net->error_count = 0;
4038 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4039 /* addr came good */
4040 net->dest_state |= SCTP_ADDR_REACHABLE;
4041 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4042 0, (void *)net, SCTP_SO_NOT_LOCKED);
4044 if (net == stcb->asoc.primary_destination) {
4045 if (stcb->asoc.alternate) {
4046 /* release the alternate,
4047 * primary is good */
4048 sctp_free_remote_addr(stcb->asoc.alternate);
4049 stcb->asoc.alternate = NULL;
4052 if (net->dest_state & SCTP_ADDR_PF) {
4053 net->dest_state &= ~SCTP_ADDR_PF;
4054 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4055 stcb->sctp_ep, stcb, net,
4056 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4057 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4058 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4059 /* Done with this net */
4062 /* restore any doubled timers */
4063 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4064 if (net->RTO < stcb->asoc.minrto) {
4065 net->RTO = stcb->asoc.minrto;
4067 if (net->RTO > stcb->asoc.maxrto) {
4068 net->RTO = stcb->asoc.maxrto;
4072 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4074 asoc->last_acked_seq = cumack;
4076 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4077 /* nothing left in-flight */
4078 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4079 net->flight_size = 0;
4080 net->partial_bytes_acked = 0;
4082 asoc->total_flight = 0;
4083 asoc->total_flight_count = 0;
4086 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4087 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4088 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4089 /* SWS sender side engages */
4090 asoc->peers_rwnd = 0;
4092 if (asoc->peers_rwnd > old_rwnd) {
4093 win_probe_recovery = 1;
4095 /* Now assure a timer where data is queued at */
4098 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4101 if (win_probe_recovery && (net->window_probe)) {
4102 win_probe_recovered = 1;
4104 * Find first chunk that was used with window probe
4105 * and clear the sent
4107 /* sa_ignore FREED_MEMORY */
4108 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4109 if (tp1->window_probe) {
4110 /* move back to data send queue */
4111 sctp_window_probe_recovery(stcb, asoc, tp1);
4116 if (net->RTO == 0) {
4117 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4119 to_ticks = MSEC_TO_TICKS(net->RTO);
4121 if (net->flight_size) {
4123 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4124 sctp_timeout_handler, &net->rxt_timer);
4125 if (net->window_probe) {
4126 net->window_probe = 0;
4129 if (net->window_probe) {
4130 /* In window probes we must assure a timer
4131 * is still running there */
4132 net->window_probe = 0;
4133 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4134 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4135 sctp_timeout_handler, &net->rxt_timer);
4137 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4138 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4140 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4145 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4146 (asoc->sent_queue_retran_cnt == 0) &&
4147 (win_probe_recovered == 0) &&
4150 * huh, this should not happen unless all packets are
4151 * PR-SCTP and marked to skip of course.
4153 if (sctp_fs_audit(asoc)) {
4154 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4155 net->flight_size = 0;
4157 asoc->total_flight = 0;
4158 asoc->total_flight_count = 0;
4159 asoc->sent_queue_retran_cnt = 0;
4160 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4161 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4162 sctp_flight_size_increase(tp1);
4163 sctp_total_flight_increase(stcb, tp1);
4164 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4165 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4172 /**********************************/
4173 /* Now what about shutdown issues */
4174 /**********************************/
4175 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4176 /* nothing left on sendqueue.. consider done */
4178 if ((asoc->stream_queue_cnt == 1) &&
4179 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4180 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4181 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4182 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4184 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4185 (asoc->stream_queue_cnt == 0)) {
4186 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4187 /* Need to abort here */
4188 struct mbuf *op_err;
4193 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4194 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4195 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4198 struct sctp_nets *netp;
4200 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4201 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4202 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4204 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4205 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4206 sctp_stop_timers_for_shutdown(stcb);
4207 if (asoc->alternate) {
4208 netp = asoc->alternate;
4210 netp = asoc->primary_destination;
4212 sctp_send_shutdown(stcb, netp);
4213 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4214 stcb->sctp_ep, stcb, netp);
4215 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4216 stcb->sctp_ep, stcb, netp);
4218 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4219 (asoc->stream_queue_cnt == 0)) {
4220 struct sctp_nets *netp;
4222 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4225 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4226 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4227 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4228 sctp_stop_timers_for_shutdown(stcb);
4229 if (asoc->alternate) {
4230 netp = asoc->alternate;
4232 netp = asoc->primary_destination;
4234 sctp_send_shutdown_ack(stcb, netp);
4235 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4236 stcb->sctp_ep, stcb, netp);
4239 /*********************************************/
4240 /* Here we perform PR-SCTP procedures */
4242 /*********************************************/
4243 /* C1. update advancedPeerAckPoint */
4244 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4245 asoc->advanced_peer_ack_point = cumack;
4247 /* PR-Sctp issues need to be addressed too */
4248 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4249 struct sctp_tmit_chunk *lchk;
4250 uint32_t old_adv_peer_ack_point;
4252 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4253 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4254 /* C3. See if we need to send a Fwd-TSN */
4255 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4257 * ISSUE with ECN, see FWD-TSN processing.
4259 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4260 send_forward_tsn(stcb, asoc);
4262 /* try to FR fwd-tsn's that get lost too */
4263 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4264 send_forward_tsn(stcb, asoc);
4269 /* Assure a timer is up */
4270 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4271 stcb->sctp_ep, stcb, lchk->whoTo);
4274 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4275 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4277 stcb->asoc.peers_rwnd,
4278 stcb->asoc.total_flight,
4279 stcb->asoc.total_output_queue_size);
4284 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4285 struct sctp_tcb *stcb,
4286 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4287 int *abort_now, uint8_t flags,
4288 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4290 struct sctp_association *asoc;
4291 struct sctp_tmit_chunk *tp1, *tp2;
4292 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4293 uint16_t wake_him = 0;
4294 uint32_t send_s = 0;
4296 int accum_moved = 0;
4297 int will_exit_fast_recovery = 0;
4298 uint32_t a_rwnd, old_rwnd;
4299 int win_probe_recovery = 0;
4300 int win_probe_recovered = 0;
4301 struct sctp_nets *net = NULL;
4304 uint8_t reneged_all = 0;
4305 uint8_t cmt_dac_flag;
4308 * we take any chance we can to service our queues since we cannot
4309 * get awoken when the socket is read from :<
4312 * Now perform the actual SACK handling: 1) Verify that it is not an
4313 * old sack, if so discard. 2) If there is nothing left in the send
4314 * queue (cum-ack is equal to last acked) then you have a duplicate
4315 * too, update any rwnd change and verify no timers are running.
4316 * then return. 3) Process any new consequtive data i.e. cum-ack
4317 * moved process these first and note that it moved. 4) Process any
4318 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4319 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4320 * sync up flightsizes and things, stop all timers and also check
4321 * for shutdown_pending state. If so then go ahead and send off the
4322 * shutdown. If in shutdown recv, send off the shutdown-ack and
4323 * start that timer, Ret. 9) Strike any non-acked things and do FR
4324 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4325 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4326 * if in shutdown_recv state.
4328 SCTP_TCB_LOCK_ASSERT(stcb);
4330 this_sack_lowest_newack = 0;
4331 SCTP_STAT_INCR(sctps_slowpath_sack);
4333 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4334 #ifdef SCTP_ASOCLOG_OF_TSNS
4335 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4336 stcb->asoc.cumack_log_at++;
4337 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4338 stcb->asoc.cumack_log_at = 0;
4343 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4344 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4345 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4347 old_rwnd = stcb->asoc.peers_rwnd;
4348 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4349 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4350 stcb->asoc.overall_error_count,
4352 SCTP_FROM_SCTP_INDATA,
4355 stcb->asoc.overall_error_count = 0;
4357 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4358 sctp_log_sack(asoc->last_acked_seq,
4365 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4367 uint32_t *dupdata, dblock;
4369 for (i = 0; i < num_dup; i++) {
4370 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4371 sizeof(uint32_t), (uint8_t *) & dblock);
4372 if (dupdata == NULL) {
4375 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4379 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4380 tp1 = TAILQ_LAST(&asoc->sent_queue,
4381 sctpchunk_listhead);
4382 send_s = tp1->rec.data.tsn + 1;
4385 send_s = asoc->sending_seq;
4387 if (SCTP_TSN_GE(cum_ack, send_s)) {
4388 struct mbuf *op_err;
4389 char msg[SCTP_DIAG_INFO_LEN];
4392 * no way, we have not even sent this TSN out yet. Peer is
4393 * hopelessly messed up with us.
4395 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4398 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4399 tp1->rec.data.tsn, (void *)tp1);
4404 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4406 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4407 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4408 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4411 /**********************/
4412 /* 1) check the range */
4413 /**********************/
4414 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4415 /* acking something behind */
4418 /* update the Rwnd of the peer */
4419 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4420 TAILQ_EMPTY(&asoc->send_queue) &&
4421 (asoc->stream_queue_cnt == 0)) {
4422 /* nothing left on send/sent and strmq */
4423 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4424 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4425 asoc->peers_rwnd, 0, 0, a_rwnd);
4427 asoc->peers_rwnd = a_rwnd;
4428 if (asoc->sent_queue_retran_cnt) {
4429 asoc->sent_queue_retran_cnt = 0;
4431 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4432 /* SWS sender side engages */
4433 asoc->peers_rwnd = 0;
4435 /* stop any timers */
4436 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4437 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4438 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4439 net->partial_bytes_acked = 0;
4440 net->flight_size = 0;
4442 asoc->total_flight = 0;
4443 asoc->total_flight_count = 0;
4447 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4448 * things. The total byte count acked is tracked in netAckSz AND
4449 * netAck2 is used to track the total bytes acked that are un-
4450 * amibguious and were never retransmitted. We track these on a per
4451 * destination address basis.
4453 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4454 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4455 /* Drag along the window_tsn for cwr's */
4456 net->cwr_window_tsn = cum_ack;
4458 net->prev_cwnd = net->cwnd;
4463 * CMT: Reset CUC and Fast recovery algo variables before
4466 net->new_pseudo_cumack = 0;
4467 net->will_exit_fast_recovery = 0;
4468 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4469 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4472 /* process the new consecutive TSN first */
4473 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4474 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4475 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4477 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4479 * If it is less than ACKED, it is
4480 * now no-longer in flight. Higher
4481 * values may occur during marking
4483 if ((tp1->whoTo->dest_state &
4484 SCTP_ADDR_UNCONFIRMED) &&
4485 (tp1->snd_count < 2)) {
4487 * If there was no retran
4488 * and the address is
4489 * un-confirmed and we sent
4491 * sacked.. its confirmed,
4494 tp1->whoTo->dest_state &=
4495 ~SCTP_ADDR_UNCONFIRMED;
4497 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4498 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4499 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4500 tp1->whoTo->flight_size,
4502 (uint32_t) (uintptr_t) tp1->whoTo,
4505 sctp_flight_size_decrease(tp1);
4506 sctp_total_flight_decrease(stcb, tp1);
4507 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4508 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4512 tp1->whoTo->net_ack += tp1->send_size;
4514 /* CMT SFR and DAC algos */
4515 this_sack_lowest_newack = tp1->rec.data.tsn;
4516 tp1->whoTo->saw_newack = 1;
4518 if (tp1->snd_count < 2) {
4520 * True non-retransmited
4523 tp1->whoTo->net_ack2 +=
4526 /* update RTO too? */
4530 sctp_calculate_rto(stcb,
4532 &tp1->sent_rcv_time,
4533 sctp_align_safe_nocopy,
4534 SCTP_RTT_FROM_DATA);
4537 if (tp1->whoTo->rto_needed == 0) {
4538 tp1->whoTo->rto_needed = 1;
4544 * CMT: CUCv2 algorithm. From the
4545 * cumack'd TSNs, for each TSN being
4546 * acked for the first time, set the
4547 * following variables for the
4548 * corresp destination.
4549 * new_pseudo_cumack will trigger a
4551 * find_(rtx_)pseudo_cumack will
4552 * trigger search for the next
4553 * expected (rtx-)pseudo-cumack.
4555 tp1->whoTo->new_pseudo_cumack = 1;
4556 tp1->whoTo->find_pseudo_cumack = 1;
4557 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4560 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4561 sctp_log_sack(asoc->last_acked_seq,
4566 SCTP_LOG_TSN_ACKED);
4568 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4569 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4572 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4573 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4574 #ifdef SCTP_AUDITING_ENABLED
4575 sctp_audit_log(0xB3,
4576 (asoc->sent_queue_retran_cnt & 0x000000ff));
4579 if (tp1->rec.data.chunk_was_revoked) {
4580 /* deflate the cwnd */
4581 tp1->whoTo->cwnd -= tp1->book_size;
4582 tp1->rec.data.chunk_was_revoked = 0;
4584 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4585 tp1->sent = SCTP_DATAGRAM_ACKED;
4592 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4593 /* always set this up to cum-ack */
4594 asoc->this_sack_highest_gap = last_tsn;
4596 if ((num_seg > 0) || (num_nr_seg > 0)) {
4599 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4600 * to be greater than the cumack. Also reset saw_newack to 0
4603 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4604 net->saw_newack = 0;
4605 net->this_sack_highest_newack = last_tsn;
4609 * thisSackHighestGap will increase while handling NEW
4610 * segments this_sack_highest_newack will increase while
4611 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4612 * used for CMT DAC algo. saw_newack will also change.
4614 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4615 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4616 num_seg, num_nr_seg, &rto_ok)) {
4620 * validate the biggest_tsn_acked in the gap acks if strict
4621 * adherence is wanted.
4623 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4625 * peer is either confused or we are under attack.
4628 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4629 biggest_tsn_acked, send_s);
4633 /*******************************************/
4634 /* cancel ALL T3-send timer if accum moved */
4635 /*******************************************/
4636 if (asoc->sctp_cmt_on_off > 0) {
4637 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4638 if (net->new_pseudo_cumack)
4639 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4641 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4646 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4647 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4648 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4652 /********************************************/
4653 /* drop the acked chunks from the sentqueue */
4654 /********************************************/
4655 asoc->last_acked_seq = cum_ack;
4657 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4658 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4661 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4662 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4663 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4666 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4670 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4671 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4672 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4673 asoc->trigger_reset = 1;
4675 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4676 if (PR_SCTP_ENABLED(tp1->flags)) {
4677 if (asoc->pr_sctp_cnt != 0)
4678 asoc->pr_sctp_cnt--;
4680 asoc->sent_queue_cnt--;
4682 /* sa_ignore NO_NULL_CHK */
4683 sctp_free_bufspace(stcb, asoc, tp1, 1);
4684 sctp_m_freem(tp1->data);
4686 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4687 asoc->sent_queue_cnt_removeable--;
4690 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4691 sctp_log_sack(asoc->last_acked_seq,
4696 SCTP_LOG_FREE_SENT);
4698 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4701 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4703 panic("Warning flight size is positive and should be 0");
4705 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4706 asoc->total_flight);
4708 asoc->total_flight = 0;
4710 /* sa_ignore NO_NULL_CHK */
4711 if ((wake_him) && (stcb->sctp_socket)) {
4712 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4716 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4718 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4720 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4721 so = SCTP_INP_SO(stcb->sctp_ep);
4722 atomic_add_int(&stcb->asoc.refcnt, 1);
4723 SCTP_TCB_UNLOCK(stcb);
4724 SCTP_SOCKET_LOCK(so, 1);
4725 SCTP_TCB_LOCK(stcb);
4726 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4727 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4728 /* assoc was freed while we were unlocked */
4729 SCTP_SOCKET_UNLOCK(so, 1);
4733 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4734 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4735 SCTP_SOCKET_UNLOCK(so, 1);
4738 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4739 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4743 if (asoc->fast_retran_loss_recovery && accum_moved) {
4744 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4745 /* Setup so we will exit RFC2582 fast recovery */
4746 will_exit_fast_recovery = 1;
4750 * Check for revoked fragments:
4752 * if Previous sack - Had no frags then we can't have any revoked if
4753 * Previous sack - Had frag's then - If we now have frags aka
4754 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4755 * some of them. else - The peer revoked all ACKED fragments, since
4756 * we had some before and now we have NONE.
4760 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4761 asoc->saw_sack_with_frags = 1;
4762 } else if (asoc->saw_sack_with_frags) {
4763 int cnt_revoked = 0;
4765 /* Peer revoked all dg's marked or acked */
4766 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4767 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4768 tp1->sent = SCTP_DATAGRAM_SENT;
4769 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4770 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4771 tp1->whoTo->flight_size,
4773 (uint32_t) (uintptr_t) tp1->whoTo,
4776 sctp_flight_size_increase(tp1);
4777 sctp_total_flight_increase(stcb, tp1);
4778 tp1->rec.data.chunk_was_revoked = 1;
4780 * To ensure that this increase in
4781 * flightsize, which is artificial, does not
4782 * throttle the sender, we also increase the
4783 * cwnd artificially.
4785 tp1->whoTo->cwnd += tp1->book_size;
4792 asoc->saw_sack_with_frags = 0;
4795 asoc->saw_sack_with_nr_frags = 1;
4797 asoc->saw_sack_with_nr_frags = 0;
4799 /* JRS - Use the congestion control given in the CC module */
4800 if (ecne_seen == 0) {
4801 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4802 if (net->net_ack2 > 0) {
4804 * Karn's rule applies to clearing error
4805 * count, this is optional.
4807 net->error_count = 0;
4808 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4809 /* addr came good */
4810 net->dest_state |= SCTP_ADDR_REACHABLE;
4811 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4812 0, (void *)net, SCTP_SO_NOT_LOCKED);
4814 if (net == stcb->asoc.primary_destination) {
4815 if (stcb->asoc.alternate) {
4816 /* release the alternate,
4817 * primary is good */
4818 sctp_free_remote_addr(stcb->asoc.alternate);
4819 stcb->asoc.alternate = NULL;
4822 if (net->dest_state & SCTP_ADDR_PF) {
4823 net->dest_state &= ~SCTP_ADDR_PF;
4824 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4825 stcb->sctp_ep, stcb, net,
4826 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4827 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4828 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4829 /* Done with this net */
4832 /* restore any doubled timers */
4833 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4834 if (net->RTO < stcb->asoc.minrto) {
4835 net->RTO = stcb->asoc.minrto;
4837 if (net->RTO > stcb->asoc.maxrto) {
4838 net->RTO = stcb->asoc.maxrto;
4842 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4844 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4845 /* nothing left in-flight */
4846 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4847 /* stop all timers */
4848 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4850 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4851 net->flight_size = 0;
4852 net->partial_bytes_acked = 0;
4854 asoc->total_flight = 0;
4855 asoc->total_flight_count = 0;
4857 /**********************************/
4858 /* Now what about shutdown issues */
4859 /**********************************/
4860 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4861 /* nothing left on sendqueue.. consider done */
4862 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4863 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4864 asoc->peers_rwnd, 0, 0, a_rwnd);
4866 asoc->peers_rwnd = a_rwnd;
4867 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4868 /* SWS sender side engages */
4869 asoc->peers_rwnd = 0;
4872 if ((asoc->stream_queue_cnt == 1) &&
4873 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4874 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4875 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4876 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4878 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4879 (asoc->stream_queue_cnt == 0)) {
4880 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4881 /* Need to abort here */
4882 struct mbuf *op_err;
4887 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4888 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4889 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4892 struct sctp_nets *netp;
4894 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4895 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4896 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4898 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4899 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4900 sctp_stop_timers_for_shutdown(stcb);
4901 if (asoc->alternate) {
4902 netp = asoc->alternate;
4904 netp = asoc->primary_destination;
4906 sctp_send_shutdown(stcb, netp);
4907 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4908 stcb->sctp_ep, stcb, netp);
4909 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4910 stcb->sctp_ep, stcb, netp);
4913 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4914 (asoc->stream_queue_cnt == 0)) {
4915 struct sctp_nets *netp;
4917 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4920 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4921 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4922 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4923 sctp_stop_timers_for_shutdown(stcb);
4924 if (asoc->alternate) {
4925 netp = asoc->alternate;
4927 netp = asoc->primary_destination;
4929 sctp_send_shutdown_ack(stcb, netp);
4930 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4931 stcb->sctp_ep, stcb, netp);
4936 * Now here we are going to recycle net_ack for a different use...
4939 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4944 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4945 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4946 * automatically ensure that.
4948 if ((asoc->sctp_cmt_on_off > 0) &&
4949 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4950 (cmt_dac_flag == 0)) {
4951 this_sack_lowest_newack = cum_ack;
4953 if ((num_seg > 0) || (num_nr_seg > 0)) {
4954 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4955 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4957 /* JRS - Use the congestion control given in the CC module */
4958 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4960 /* Now are we exiting loss recovery ? */
4961 if (will_exit_fast_recovery) {
4962 /* Ok, we must exit fast recovery */
4963 asoc->fast_retran_loss_recovery = 0;
4965 if ((asoc->sat_t3_loss_recovery) &&
4966 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4967 /* end satellite t3 loss recovery */
4968 asoc->sat_t3_loss_recovery = 0;
4973 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4974 if (net->will_exit_fast_recovery) {
4975 /* Ok, we must exit fast recovery */
4976 net->fast_retran_loss_recovery = 0;
4980 /* Adjust and set the new rwnd value */
4981 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4982 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4983 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4985 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4986 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4987 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4988 /* SWS sender side engages */
4989 asoc->peers_rwnd = 0;
4991 if (asoc->peers_rwnd > old_rwnd) {
4992 win_probe_recovery = 1;
4995 * Now we must setup so we have a timer up for anyone with
5001 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5002 if (win_probe_recovery && (net->window_probe)) {
5003 win_probe_recovered = 1;
5005 * Find first chunk that was used with
5006 * window probe and clear the event. Put
5007 * it back into the send queue as if has
5010 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5011 if (tp1->window_probe) {
5012 sctp_window_probe_recovery(stcb, asoc, tp1);
5017 if (net->flight_size) {
5019 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5020 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5021 stcb->sctp_ep, stcb, net);
5023 if (net->window_probe) {
5024 net->window_probe = 0;
5027 if (net->window_probe) {
5028 /* In window probes we must assure a timer
5029 * is still running there */
5030 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5031 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5032 stcb->sctp_ep, stcb, net);
5035 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5036 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5038 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5043 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5044 (asoc->sent_queue_retran_cnt == 0) &&
5045 (win_probe_recovered == 0) &&
5048 * huh, this should not happen unless all packets are
5049 * PR-SCTP and marked to skip of course.
5051 if (sctp_fs_audit(asoc)) {
5052 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5053 net->flight_size = 0;
5055 asoc->total_flight = 0;
5056 asoc->total_flight_count = 0;
5057 asoc->sent_queue_retran_cnt = 0;
5058 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5059 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5060 sctp_flight_size_increase(tp1);
5061 sctp_total_flight_increase(stcb, tp1);
5062 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5063 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5070 /*********************************************/
5071 /* Here we perform PR-SCTP procedures */
5073 /*********************************************/
5074 /* C1. update advancedPeerAckPoint */
5075 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5076 asoc->advanced_peer_ack_point = cum_ack;
5078 /* C2. try to further move advancedPeerAckPoint ahead */
5079 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5080 struct sctp_tmit_chunk *lchk;
5081 uint32_t old_adv_peer_ack_point;
5083 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5084 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5085 /* C3. See if we need to send a Fwd-TSN */
5086 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5088 * ISSUE with ECN, see FWD-TSN processing.
5090 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5091 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5092 0xee, cum_ack, asoc->advanced_peer_ack_point,
5093 old_adv_peer_ack_point);
5095 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5096 send_forward_tsn(stcb, asoc);
5098 /* try to FR fwd-tsn's that get lost too */
5099 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5100 send_forward_tsn(stcb, asoc);
5105 /* Assure a timer is up */
5106 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5107 stcb->sctp_ep, stcb, lchk->whoTo);
5110 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5111 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5113 stcb->asoc.peers_rwnd,
5114 stcb->asoc.total_flight,
5115 stcb->asoc.total_output_queue_size);
5120 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5123 uint32_t cum_ack, a_rwnd;
5125 cum_ack = ntohl(cp->cumulative_tsn_ack);
5126 /* Arrange so a_rwnd does NOT change */
5127 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5129 /* Now call the express sack handling */
5130 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5134 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5135 struct sctp_stream_in *strmin)
5137 struct sctp_queued_to_read *ctl, *nctl;
5138 struct sctp_association *asoc;
5140 int need_reasm_check = 0;
5143 mid = strmin->last_mid_delivered;
5145 * First deliver anything prior to and including the stream no that
5148 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5149 if (SCTP_MID_GE(asoc->idata_supported, mid, ctl->mid)) {
5150 /* this is deliverable now */
5151 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5152 if (ctl->on_strm_q) {
5153 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5154 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5155 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5156 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5159 panic("strmin: %p ctl: %p unknown %d",
5160 strmin, ctl, ctl->on_strm_q);
5165 /* subtract pending on streams */
5166 asoc->size_on_all_streams -= ctl->length;
5167 sctp_ucount_decr(asoc->cnt_on_all_streams);
5168 /* deliver it to at least the delivery-q */
5169 if (stcb->sctp_socket) {
5170 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5171 sctp_add_to_readq(stcb->sctp_ep, stcb,
5173 &stcb->sctp_socket->so_rcv,
5174 1, SCTP_READ_LOCK_HELD,
5175 SCTP_SO_NOT_LOCKED);
5178 /* Its a fragmented message */
5179 if (ctl->first_frag_seen) {
5180 /* Make it so this is next to
5181 * deliver, we restore later */
5182 strmin->last_mid_delivered = ctl->mid - 1;
5183 need_reasm_check = 1;
5188 /* no more delivery now. */
5192 if (need_reasm_check) {
5195 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5196 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5197 /* Restore the next to deliver unless we are ahead */
5198 strmin->last_mid_delivered = mid;
5201 /* Left the front Partial one on */
5204 need_reasm_check = 0;
5207 * now we must deliver things in queue the normal way if any are
5210 mid = strmin->last_mid_delivered + 1;
5211 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5212 if (SCTP_MID_EQ(asoc->idata_supported, mid, ctl->mid)) {
5213 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5214 /* this is deliverable now */
5215 if (ctl->on_strm_q) {
5216 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5217 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5218 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5219 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5222 panic("strmin: %p ctl: %p unknown %d",
5223 strmin, ctl, ctl->on_strm_q);
5228 /* subtract pending on streams */
5229 asoc->size_on_all_streams -= ctl->length;
5230 sctp_ucount_decr(asoc->cnt_on_all_streams);
5231 /* deliver it to at least the delivery-q */
5232 strmin->last_mid_delivered = ctl->mid;
5233 if (stcb->sctp_socket) {
5234 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5235 sctp_add_to_readq(stcb->sctp_ep, stcb,
5237 &stcb->sctp_socket->so_rcv, 1,
5238 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5241 mid = strmin->last_mid_delivered + 1;
5243 /* Its a fragmented message */
5244 if (ctl->first_frag_seen) {
5245 /* Make it so this is next to
5247 strmin->last_mid_delivered = ctl->mid - 1;
5248 need_reasm_check = 1;
5256 if (need_reasm_check) {
5257 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5264 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5265 struct sctp_association *asoc,
5266 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5268 struct sctp_queued_to_read *control;
5269 struct sctp_stream_in *strm;
5270 struct sctp_tmit_chunk *chk, *nchk;
5271 int cnt_removed = 0;
5274 * For now large messages held on the stream reasm that are complete
5275 * will be tossed too. We could in theory do more work to spin
5276 * through and stop after dumping one msg aka seeing the start of a
5277 * new msg at the head, and call the delivery function... to see if
5278 * it can be delivered... But for now we just dump everything on the
5281 strm = &asoc->strmin[stream];
5282 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5283 if (control == NULL) {
5287 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5290 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5291 /* Purge hanging chunks */
5292 if (!asoc->idata_supported && (ordered == 0)) {
5293 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5298 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5299 asoc->size_on_reasm_queue -= chk->send_size;
5300 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5302 sctp_m_freem(chk->data);
5305 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5307 if (!TAILQ_EMPTY(&control->reasm)) {
5308 /* This has to be old data, unordered */
5309 if (control->data) {
5310 sctp_m_freem(control->data);
5311 control->data = NULL;
5313 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5314 chk = TAILQ_FIRST(&control->reasm);
5315 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5316 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5317 sctp_add_chk_to_control(control, strm, stcb, asoc,
5318 chk, SCTP_READ_LOCK_HELD);
5320 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5323 if (control->on_strm_q == SCTP_ON_ORDERED) {
5324 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5325 control->on_strm_q = 0;
5326 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5327 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5328 control->on_strm_q = 0;
5330 } else if (control->on_strm_q) {
5331 panic("strm: %p ctl: %p unknown %d",
5332 strm, control, control->on_strm_q);
5335 control->on_strm_q = 0;
5336 if (control->on_read_q == 0) {
5337 sctp_free_remote_addr(control->whoFrom);
5338 if (control->data) {
5339 sctp_m_freem(control->data);
5340 control->data = NULL;
5342 sctp_free_a_readq(stcb, control);
5347 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5348 struct sctp_forward_tsn_chunk *fwd,
5349 int *abort_flag, struct mbuf *m, int offset)
5351 /* The pr-sctp fwd tsn */
5353 * here we will perform all the data receiver side steps for
5354 * processing FwdTSN, as required in by pr-sctp draft:
5356 * Assume we get FwdTSN(x):
5358 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5359 * + others we have 3) examine and update re-ordering queue on
5360 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5361 * report where we are.
5363 struct sctp_association *asoc;
5364 uint32_t new_cum_tsn, gap;
5365 unsigned int i, fwd_sz, m_size;
5367 struct sctp_stream_in *strm;
5368 struct sctp_queued_to_read *ctl, *sv;
5371 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5372 SCTPDBG(SCTP_DEBUG_INDATA1,
5373 "Bad size too small/big fwd-tsn\n");
5376 m_size = (stcb->asoc.mapping_array_size << 3);
5377 /*************************************************************/
5378 /* 1. Here we update local cumTSN and shift the bitmap array */
5379 /*************************************************************/
5380 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5382 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5383 /* Already got there ... */
5387 * now we know the new TSN is more advanced, let's find the actual
5390 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5391 asoc->cumulative_tsn = new_cum_tsn;
5392 if (gap >= m_size) {
5393 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5394 struct mbuf *op_err;
5395 char msg[SCTP_DIAG_INFO_LEN];
5398 * out of range (of single byte chunks in the rwnd I
5399 * give out). This must be an attacker.
5402 snprintf(msg, sizeof(msg),
5403 "New cum ack %8.8x too high, highest TSN %8.8x",
5404 new_cum_tsn, asoc->highest_tsn_inside_map);
5405 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5406 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5407 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5410 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5412 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5413 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5414 asoc->highest_tsn_inside_map = new_cum_tsn;
5416 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5417 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5419 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5420 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5423 SCTP_TCB_LOCK_ASSERT(stcb);
5424 for (i = 0; i <= gap; i++) {
5425 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5426 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5427 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5428 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5429 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5434 /*************************************************************/
5435 /* 2. Clear up re-assembly queue */
5436 /*************************************************************/
5438 /* This is now done as part of clearing up the stream/seq */
5439 if (asoc->idata_supported == 0) {
5442 /* Flush all the un-ordered data based on cum-tsn */
5443 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5444 for (sid = 0; sid < asoc->streamincnt; sid++) {
5445 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5447 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5449 /*******************************************************/
5450 /* 3. Update the PR-stream re-ordering queues and fix */
5451 /* delivery issues as needed. */
5452 /*******************************************************/
5453 fwd_sz -= sizeof(*fwd);
5456 unsigned int num_str;
5457 uint32_t mid, cur_mid;
5459 uint16_t ordered, flags;
5460 struct sctp_strseq *stseq, strseqbuf;
5461 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5463 offset += sizeof(*fwd);
5465 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5466 if (asoc->idata_supported) {
5467 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5469 num_str = fwd_sz / sizeof(struct sctp_strseq);
5471 for (i = 0; i < num_str; i++) {
5472 if (asoc->idata_supported) {
5473 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5474 sizeof(struct sctp_strseq_mid),
5475 (uint8_t *) & strseqbuf_m);
5476 offset += sizeof(struct sctp_strseq_mid);
5477 if (stseq_m == NULL) {
5480 sid = ntohs(stseq_m->sid);
5481 mid = ntohl(stseq_m->mid);
5482 flags = ntohs(stseq_m->flags);
5483 if (flags & PR_SCTP_UNORDERED_FLAG) {
5489 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5490 sizeof(struct sctp_strseq),
5491 (uint8_t *) & strseqbuf);
5492 offset += sizeof(struct sctp_strseq);
5493 if (stseq == NULL) {
5496 sid = ntohs(stseq->sid);
5497 mid = (uint32_t) ntohs(stseq->ssn);
5505 * Ok we now look for the stream/seq on the read
5506 * queue where its not all delivered. If we find it
5507 * we transmute the read entry into a PDI_ABORTED.
5509 if (sid >= asoc->streamincnt) {
5510 /* screwed up streams, stop! */
5513 if ((asoc->str_of_pdapi == sid) &&
5514 (asoc->ssn_of_pdapi == mid)) {
5516 * If this is the one we were partially
5517 * delivering now then we no longer are.
5518 * Note this will change with the reassembly
5521 asoc->fragmented_delivery_inprogress = 0;
5523 strm = &asoc->strmin[sid];
5524 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5525 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5527 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5528 if ((ctl->sinfo_stream == sid) &&
5529 (SCTP_MID_EQ(asoc->idata_supported, ctl->mid, mid))) {
5530 str_seq = (sid << 16) | (0x0000ffff & mid);
5531 ctl->pdapi_aborted = 1;
5532 sv = stcb->asoc.control_pdapi;
5534 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5535 TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5536 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5537 TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5539 } else if (ctl->on_strm_q) {
5540 panic("strm: %p ctl: %p unknown %d",
5541 strm, ctl, ctl->on_strm_q);
5545 stcb->asoc.control_pdapi = ctl;
5546 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5548 SCTP_PARTIAL_DELIVERY_ABORTED,
5550 SCTP_SO_NOT_LOCKED);
5551 stcb->asoc.control_pdapi = sv;
5553 } else if ((ctl->sinfo_stream == sid) &&
5554 SCTP_MID_GT(asoc->idata_supported, ctl->mid, mid)) {
5555 /* We are past our victim SSN */
5559 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5560 /* Update the sequence number */
5561 strm->last_mid_delivered = mid;
5563 /* now kick the stream the new way */
5564 /* sa_ignore NO_NULL_CHK */
5565 sctp_kick_prsctp_reorder_queue(stcb, strm);
5567 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5570 * Now slide thing forward.
5572 sctp_slide_mapping_arrays(stcb);