2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
54 * NOTES: On the outbound side of things I need to check the sack timer to
55 * see if I should generate a sack into the chunk queue (if I have data to
56 * send that is and will be sending it .. for bundling.
58 * The callback in sctp_usrreq.c will get called when the socket is read from.
59 * This will cause sctp_service_queues() to get called on the top entry in
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64 struct sctp_stream_in *strm,
65 struct sctp_tcb *stcb,
66 struct sctp_association *asoc,
67 struct sctp_tmit_chunk *chk);
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 /* Calculate what the rwnd would be */
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
83 * This is really set wrong with respect to a 1-2-m socket. Since
84 * the sb_cc is the count that everyone as put up. When we re-write
85 * sctp_soreceive then we will fix this so that ONLY this
86 * associations data is taken into account.
88 if (stcb->sctp_socket == NULL) {
91 if (stcb->asoc.sb_cc == 0 &&
92 asoc->size_on_reasm_queue == 0 &&
93 asoc->size_on_all_streams == 0) {
94 /* Full rwnd granted */
95 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
98 /* get actual space */
99 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
101 * take out what has NOT been put on socket queue and we yet hold
104 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 asoc->cnt_on_reasm_queue * MSIZE));
106 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 asoc->cnt_on_all_streams * MSIZE));
112 /* what is the overhead of all these rwnd's */
113 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
115 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 * even it is 0. SWS engaged
118 if (calc < stcb->asoc.my_rwnd_control_len) {
127 * Build out our readq entry based on the incoming packet.
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131 struct sctp_nets *net,
132 uint32_t tsn, uint32_t ppid,
133 uint32_t context, uint16_t stream_no,
134 uint32_t stream_seq, uint8_t flags,
137 struct sctp_queued_to_read *read_queue_e = NULL;
139 sctp_alloc_a_readq(stcb, read_queue_e);
140 if (read_queue_e == NULL) {
143 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 read_queue_e->sinfo_stream = stream_no;
145 read_queue_e->sinfo_ssn = stream_seq;
146 read_queue_e->sinfo_flags = (flags << 8);
147 read_queue_e->sinfo_ppid = ppid;
148 read_queue_e->sinfo_context = context;
149 read_queue_e->sinfo_tsn = tsn;
150 read_queue_e->sinfo_cumtsn = tsn;
151 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
152 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 TAILQ_INIT(&read_queue_e->reasm);
154 read_queue_e->whoFrom = net;
155 atomic_add_int(&net->ref_count, 1);
156 read_queue_e->data = dm;
157 read_queue_e->stcb = stcb;
158 read_queue_e->port_from = stcb->rport;
160 return (read_queue_e);
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
166 struct sctp_extrcvinfo *seinfo;
167 struct sctp_sndrcvinfo *outinfo;
168 struct sctp_rcvinfo *rcvinfo;
169 struct sctp_nxtinfo *nxtinfo;
176 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 /* user does not want any ancillary data */
183 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
186 seinfo = (struct sctp_extrcvinfo *)sinfo;
187 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
190 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
197 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
200 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
206 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
211 SCTP_BUF_LEN(ret) = 0;
213 /* We need a CMSG header followed by the struct */
214 cmh = mtod(ret, struct cmsghdr *);
216 * Make sure that there is no un-initialized padding between the
217 * cmsg header and cmsg data and after the cmsg data.
220 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 cmh->cmsg_level = IPPROTO_SCTP;
222 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 cmh->cmsg_type = SCTP_RCVINFO;
224 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 rcvinfo->rcv_context = sinfo->sinfo_context;
232 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
237 cmh->cmsg_level = IPPROTO_SCTP;
238 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 cmh->cmsg_type = SCTP_NXTINFO;
240 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 nxtinfo->nxt_flags = 0;
243 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 nxtinfo->nxt_flags |= SCTP_UNORDERED;
246 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 nxtinfo->nxt_flags |= SCTP_COMPLETE;
252 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 cmh->cmsg_level = IPPROTO_SCTP;
260 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
262 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 cmh->cmsg_type = SCTP_EXTRCV;
264 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 cmh->cmsg_type = SCTP_SNDRCV;
270 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
280 uint32_t gap, i, cumackp1;
282 int in_r = 0, in_nr = 0;
284 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
287 cumackp1 = asoc->cumulative_tsn + 1;
288 if (SCTP_TSN_GT(cumackp1, tsn)) {
290 * this tsn is behind the cum ack and thus we don't need to
291 * worry about it being moved from one to the other.
295 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 if ((in_r == 0) && (in_nr == 0)) {
300 panic("Things are really messed up now");
302 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 sctp_print_mapping_array(asoc);
307 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
309 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 asoc->highest_tsn_inside_nr_map = tsn;
313 if (tsn == asoc->highest_tsn_inside_map) {
314 /* We must back down to see what the new highest is */
315 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 asoc->highest_tsn_inside_map = i;
324 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331 struct sctp_association *asoc,
332 struct sctp_queued_to_read *control)
334 struct sctp_queued_to_read *at;
335 struct sctp_readhead *q;
336 uint8_t bits, unordered;
338 bits = (control->sinfo_flags >> 8);
339 unordered = bits & SCTP_DATA_UNORDERED;
341 q = &strm->uno_inqueue;
342 if (asoc->idata_supported == 0) {
343 if (!TAILQ_EMPTY(q)) {
345 * Only one stream can be here in old style
350 TAILQ_INSERT_TAIL(q, control, next_instrm);
351 control->on_strm_q = SCTP_ON_UNORDERED;
357 if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
360 if (TAILQ_EMPTY(q)) {
362 TAILQ_INSERT_HEAD(q, control, next_instrm);
364 control->on_strm_q = SCTP_ON_UNORDERED;
366 control->on_strm_q = SCTP_ON_ORDERED;
370 TAILQ_FOREACH(at, q, next_instrm) {
371 if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
373 * one in queue is bigger than the new one,
374 * insert before this one
376 TAILQ_INSERT_BEFORE(at, control, next_instrm);
378 control->on_strm_q = SCTP_ON_UNORDERED;
380 control->on_strm_q = SCTP_ON_ORDERED;
383 } else if (at->msg_id == control->msg_id) {
385 * Gak, He sent me a duplicate msg id
386 * number?? return -1 to abort.
390 if (TAILQ_NEXT(at, next_instrm) == NULL) {
392 * We are at the end, insert it
395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 sctp_log_strm_del(control, at,
397 SCTP_STR_LOG_FROM_INSERT_TL);
399 TAILQ_INSERT_AFTER(q,
400 at, control, next_instrm);
402 control->on_strm_q = SCTP_ON_UNORDERED;
404 control->on_strm_q = SCTP_ON_ORDERED;
415 sctp_abort_in_reasm(struct sctp_tcb *stcb,
416 struct sctp_stream_in *strm,
417 struct sctp_queued_to_read *control,
418 struct sctp_tmit_chunk *chk,
419 int *abort_flag, int opspot)
421 char msg[SCTP_DIAG_INFO_LEN];
424 if (stcb->asoc.idata_supported) {
425 snprintf(msg, sizeof(msg),
426 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
428 control->fsn_included,
429 chk->rec.data.TSN_seq,
430 chk->rec.data.stream_number,
431 chk->rec.data.fsn_num, chk->rec.data.stream_seq);
433 snprintf(msg, sizeof(msg),
434 "Reass %x, CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x, SSN:%4.4x",
436 control->fsn_included,
437 chk->rec.data.TSN_seq,
438 chk->rec.data.stream_number,
439 chk->rec.data.fsn_num,
440 (uint16_t) chk->rec.data.stream_seq);
442 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
443 sctp_m_freem(chk->data);
445 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
446 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
447 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
452 clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
455 * The control could not be placed and must be cleaned.
457 struct sctp_tmit_chunk *chk, *nchk;
459 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
460 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
462 sctp_m_freem(chk->data);
464 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
466 sctp_free_a_readq(stcb, control);
470 * Queue the chunk either right into the socket buffer if it is the next one
471 * to go OR put it in the correct place in the delivery queue. If we do
472 * append to the so_buf, keep doing so until we are out of order as
473 * long as the control's entered are non-fragmented.
476 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
477 struct sctp_stream_in *strm,
478 struct sctp_association *asoc,
479 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
482 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
483 * all the data in one stream this could happen quite rapidly. One
484 * could use the TSN to keep track of things, but this scheme breaks
485 * down in the other type of stream useage that could occur. Send a
486 * single msg to stream 0, send 4Billion messages to stream 1, now
487 * send a message to stream 0. You have a situation where the TSN
488 * has wrapped but not in the stream. Is this worth worrying about
489 * or should we just change our queue sort at the bottom to be by
492 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
493 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
494 * assignment this could happen... and I don't see how this would be
495 * a violation. So for now I am undecided an will leave the sort by
496 * SSN alone. Maybe a hybred approach is the answer
499 struct sctp_queued_to_read *at;
503 char msg[SCTP_DIAG_INFO_LEN];
505 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
506 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
508 if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
509 /* The incoming sseq is behind where we last delivered? */
510 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
511 control->sinfo_ssn, strm->last_sequence_delivered);
514 * throw it in the stream so it gets cleaned up in
515 * association destruction
517 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
518 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
519 strm->last_sequence_delivered, control->sinfo_tsn,
520 control->sinfo_stream, control->sinfo_ssn);
521 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
522 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
523 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
528 if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
532 asoc->size_on_all_streams += control->length;
533 sctp_ucount_incr(asoc->cnt_on_all_streams);
534 nxt_todel = strm->last_sequence_delivered + 1;
535 if (nxt_todel == control->sinfo_ssn) {
536 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
539 so = SCTP_INP_SO(stcb->sctp_ep);
540 atomic_add_int(&stcb->asoc.refcnt, 1);
541 SCTP_TCB_UNLOCK(stcb);
542 SCTP_SOCKET_LOCK(so, 1);
544 atomic_subtract_int(&stcb->asoc.refcnt, 1);
545 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
546 SCTP_SOCKET_UNLOCK(so, 1);
550 /* can be delivered right away? */
551 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
552 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
554 /* EY it wont be queued if it could be delivered directly */
556 asoc->size_on_all_streams -= control->length;
557 sctp_ucount_decr(asoc->cnt_on_all_streams);
558 strm->last_sequence_delivered++;
559 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
560 sctp_add_to_readq(stcb->sctp_ep, stcb,
562 &stcb->sctp_socket->so_rcv, 1,
563 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
564 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
566 nxt_todel = strm->last_sequence_delivered + 1;
567 if ((nxt_todel == control->sinfo_ssn) &&
568 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
569 asoc->size_on_all_streams -= control->length;
570 sctp_ucount_decr(asoc->cnt_on_all_streams);
571 if (control->on_strm_q == SCTP_ON_ORDERED) {
572 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
574 panic("Huh control:%p is on_strm_q:%d",
575 control, control->on_strm_q);
577 control->on_strm_q = 0;
578 strm->last_sequence_delivered++;
580 * We ignore the return of deliver_data here
581 * since we always can hold the chunk on the
582 * d-queue. And we have a finite number that
583 * can be delivered from the strq.
585 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
586 sctp_log_strm_del(control, NULL,
587 SCTP_STR_LOG_FROM_IMMED_DEL);
589 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
590 sctp_add_to_readq(stcb->sctp_ep, stcb,
592 &stcb->sctp_socket->so_rcv, 1,
593 SCTP_READ_LOCK_NOT_HELD,
596 } else if (nxt_todel == control->sinfo_ssn) {
601 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
602 SCTP_SOCKET_UNLOCK(so, 1);
607 * Ok, we did not deliver this guy, find the correct place
608 * to put it on the queue.
610 if (sctp_place_control_in_stream(strm, asoc, control)) {
611 char msg[SCTP_DIAG_INFO_LEN];
614 snprintf(msg, sizeof(msg),
615 "Queue to str mid:%d duplicate",
617 clean_up_control(stcb, control);
618 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
619 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
620 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
628 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
630 struct mbuf *m, *prev = NULL;
631 struct sctp_tcb *stcb;
633 stcb = control->stcb;
634 control->held_length = 0;
638 if (SCTP_BUF_LEN(m) == 0) {
639 /* Skip mbufs with NO length */
642 control->data = sctp_m_free(m);
645 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
646 m = SCTP_BUF_NEXT(prev);
649 control->tail_mbuf = prev;
654 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
655 if (control->on_read_q) {
657 * On read queue so we must increment the SB stuff,
658 * we assume caller has done any locks of SB.
660 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
662 m = SCTP_BUF_NEXT(m);
665 control->tail_mbuf = prev;
670 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
672 struct mbuf *prev = NULL;
673 struct sctp_tcb *stcb;
675 stcb = control->stcb;
677 panic("Control broken");
679 if (control->tail_mbuf == NULL) {
682 sctp_setup_tail_pointer(control);
685 control->tail_mbuf->m_next = m;
687 if (SCTP_BUF_LEN(m) == 0) {
688 /* Skip mbufs with NO length */
691 control->tail_mbuf->m_next = sctp_m_free(m);
692 m = control->tail_mbuf->m_next;
694 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
695 m = SCTP_BUF_NEXT(prev);
698 control->tail_mbuf = prev;
703 if (control->on_read_q) {
705 * On read queue so we must increment the SB stuff,
706 * we assume caller has done any locks of SB.
708 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
710 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
711 m = SCTP_BUF_NEXT(m);
714 control->tail_mbuf = prev;
719 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
721 memset(nc, 0, sizeof(struct sctp_queued_to_read));
722 nc->sinfo_stream = control->sinfo_stream;
723 nc->sinfo_ssn = control->sinfo_ssn;
724 TAILQ_INIT(&nc->reasm);
725 nc->top_fsn = control->top_fsn;
726 nc->msg_id = control->msg_id;
727 nc->sinfo_flags = control->sinfo_flags;
728 nc->sinfo_ppid = control->sinfo_ppid;
729 nc->sinfo_context = control->sinfo_context;
730 nc->fsn_included = 0xffffffff;
731 nc->sinfo_tsn = control->sinfo_tsn;
732 nc->sinfo_cumtsn = control->sinfo_cumtsn;
733 nc->sinfo_assoc_id = control->sinfo_assoc_id;
734 nc->whoFrom = control->whoFrom;
735 atomic_add_int(&nc->whoFrom->ref_count, 1);
736 nc->stcb = control->stcb;
737 nc->port_from = control->port_from;
741 sctp_handle_old_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm,
742 struct sctp_queued_to_read *control, uint32_t pd_point)
745 * Special handling for the old un-ordered data chunk. All the
746 * chunks/TSN's go to msg_id 0. So we have to do the old style
747 * watching to see if we have it all. If you return one, no other
748 * control entries on the un-ordered queue will be looked at. In
749 * theory there should be no others entries in reality, unless the
750 * guy is sending both unordered NDATA and unordered DATA...
752 struct sctp_tmit_chunk *chk, *lchk, *tchk;
754 struct sctp_queued_to_read *nc = NULL;
757 if (control->first_frag_seen == 0) {
758 /* Nothing we can do, we have not seen the first piece yet */
761 /* Collapse any we can */
764 fsn = control->fsn_included + 1;
765 /* Now what can we add? */
766 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
767 if (chk->rec.data.fsn_num == fsn) {
769 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
770 sctp_add_chk_to_control(control, strm, stcb, asoc, chk);
774 if (control->end_added) {
776 if (!TAILQ_EMPTY(&control->reasm)) {
778 * Ok we have to move anything left
779 * on the control queue to a new
782 sctp_alloc_a_readq(stcb, nc);
783 sctp_build_readq_entry_from_ctl(nc, control);
784 tchk = TAILQ_FIRST(&control->reasm);
785 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
786 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
787 nc->first_frag_seen = 1;
788 nc->fsn_included = tchk->rec.data.fsn_num;
789 nc->data = tchk->data;
790 sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
792 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
793 sctp_setup_tail_pointer(nc);
794 tchk = TAILQ_FIRST(&control->reasm);
796 /* Spin the rest onto the queue */
798 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
799 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
800 tchk = TAILQ_FIRST(&control->reasm);
803 * Now lets add it to the queue
804 * after removing control
806 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
807 nc->on_strm_q = SCTP_ON_UNORDERED;
808 if (control->on_strm_q) {
809 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
810 control->on_strm_q = 0;
813 if (control->on_read_q == 0) {
814 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
815 &stcb->sctp_socket->so_rcv, control->end_added,
816 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
818 if (control->pdapi_started) {
819 strm->pd_api_started = 0;
820 control->pdapi_started = 0;
822 if (control->on_strm_q) {
823 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
824 control->on_strm_q = 0;
826 sctp_wakeup_the_read_socket(stcb->sctp_ep);
827 if ((nc) && (nc->first_frag_seen)) {
829 * Switch to the new guy and
843 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
844 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
845 &stcb->sctp_socket->so_rcv, control->end_added,
846 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
847 strm->pd_api_started = 1;
848 control->pdapi_started = 1;
849 sctp_wakeup_the_read_socket(stcb->sctp_ep);
857 sctp_inject_old_data_unordered(struct sctp_tcb *stcb, struct sctp_association *asoc,
858 struct sctp_stream_in *strm,
859 struct sctp_queued_to_read *control,
860 struct sctp_tmit_chunk *chk,
863 struct sctp_tmit_chunk *at;
867 * Here we need to place the chunk into the control structure sorted
868 * in the correct order.
870 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
871 /* Its the very first one. */
872 SCTPDBG(SCTP_DEBUG_XXX,
873 "chunk is a first fsn:%d becomes fsn_included\n",
874 chk->rec.data.fsn_num);
875 if (control->first_frag_seen) {
877 * In old un-ordered we can reassembly on one
878 * control multiple messages. As long as the next
879 * FIRST is greater then the old first (TSN i.e. FSN
885 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
887 * Easy way the start of a new guy beyond
892 if ((chk->rec.data.fsn_num == control->fsn_included) ||
893 (control->pdapi_started)) {
895 * Ok this should not happen, if it does we
896 * started the pd-api on the higher TSN
897 * (since the equals part is a TSN failure
900 * We are completly hosed in that case since I
901 * have no way to recover. This really will
902 * only happen if we can get more TSN's
903 * higher before the pd-api-point.
905 sctp_abort_in_reasm(stcb, strm, control, chk,
907 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
912 * Ok we have two firsts and the one we just got is
913 * smaller than the one we previously placed.. yuck!
914 * We must swap them out.
917 tdata = control->data;
918 control->data = chk->data;
920 /* Swap the lengths */
921 tmp = control->length;
922 control->length = chk->send_size;
923 chk->send_size = tmp;
924 /* Fix the FSN included */
925 tmp = control->fsn_included;
926 control->fsn_included = chk->rec.data.fsn_num;
927 chk->rec.data.fsn_num = tmp;
930 control->first_frag_seen = 1;
931 control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
932 control->data = chk->data;
933 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
935 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
936 sctp_setup_tail_pointer(control);
940 if (TAILQ_EMPTY(&control->reasm)) {
941 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
942 asoc->size_on_reasm_queue += chk->send_size;
943 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
946 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
947 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
949 * This one in queue is bigger than the new one,
950 * insert the new one before at.
952 asoc->size_on_reasm_queue += chk->send_size;
953 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
955 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
957 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
959 * They sent a duplicate fsn number. This really
960 * should not happen since the FSN is a TSN and it
961 * should have been dropped earlier.
964 sctp_m_freem(chk->data);
967 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
968 sctp_abort_in_reasm(stcb, strm, control, chk,
970 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
976 asoc->size_on_reasm_queue += chk->send_size;
977 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
978 control->top_fsn = chk->rec.data.fsn_num;
979 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
984 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm)
987 * Given a stream, strm, see if any of the SSN's on it that are
988 * fragmented are ready to deliver. If so go ahead and place them on
989 * the read queue. In so placing if we have hit the end, then we
990 * need to remove them from the stream's queue.
992 struct sctp_queued_to_read *control, *nctl = NULL;
993 uint32_t next_to_del;
997 if (stcb->sctp_socket) {
998 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
999 stcb->sctp_ep->partial_delivery_point);
1001 pd_point = stcb->sctp_ep->partial_delivery_point;
1003 control = TAILQ_FIRST(&strm->uno_inqueue);
1005 (asoc->idata_supported == 0)) {
1006 /* Special handling needed for "old" data format */
1007 nctl = TAILQ_NEXT(control, next_instrm);
1008 if (sctp_handle_old_data(stcb, asoc, strm, control, pd_point)) {
1012 if (strm->pd_api_started) {
1013 /* Can't add more */
1017 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control:%p e(%d) ssn:%d top_fsn:%d inc_fsn:%d -uo\n",
1018 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
1019 nctl = TAILQ_NEXT(control, next_instrm);
1020 if (control->end_added) {
1021 /* We just put the last bit on */
1022 if (control->on_strm_q) {
1023 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1024 panic("Huh control:%p on_q:%d -- not unordered?",
1025 control, control->on_strm_q);
1027 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1028 control->on_strm_q = 0;
1030 if (control->on_read_q == 0) {
1031 sctp_add_to_readq(stcb->sctp_ep, stcb,
1033 &stcb->sctp_socket->so_rcv, control->end_added,
1034 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1037 /* Can we do a PD-API for this un-ordered guy? */
1038 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1039 strm->pd_api_started = 1;
1040 control->pdapi_started = 1;
1041 sctp_add_to_readq(stcb->sctp_ep, stcb,
1043 &stcb->sctp_socket->so_rcv, control->end_added,
1044 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1052 control = TAILQ_FIRST(&strm->inqueue);
1053 if (strm->pd_api_started) {
1054 /* Can't add more */
1057 if (control == NULL) {
1060 if (strm->last_sequence_delivered == control->sinfo_ssn) {
1062 * Ok the guy at the top was being partially delivered
1063 * completed, so we remove it. Note the pd_api flag was
1064 * taken off when the chunk was merged on in
1065 * sctp_queue_data_for_reasm below.
1067 nctl = TAILQ_NEXT(control, next_instrm);
1068 SCTPDBG(SCTP_DEBUG_XXX,
1069 "Looking at control:%p e(%d) ssn:%d top_fsn:%d inc_fsn:%d (lastdel:%d)- o\n",
1070 control, control->end_added, control->sinfo_ssn,
1071 control->top_fsn, control->fsn_included,
1072 strm->last_sequence_delivered);
1073 if (control->end_added) {
1074 if (control->on_strm_q) {
1075 if (control->on_strm_q != SCTP_ON_ORDERED) {
1076 panic("Huh control:%p on_q:%d -- not ordered?",
1077 control, control->on_strm_q);
1079 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1080 control->on_strm_q = 0;
1082 if (control->on_read_q == 0) {
1083 sctp_add_to_readq(stcb->sctp_ep, stcb,
1085 &stcb->sctp_socket->so_rcv, control->end_added,
1086 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1088 if (strm->pd_api_started && control->pdapi_started) {
1089 control->pdapi_started = 0;
1090 strm->pd_api_started = 0;
1095 if (strm->pd_api_started) {
1097 * Can't add more must have gotten an un-ordered above being
1098 * partially delivered.
1103 next_to_del = strm->last_sequence_delivered + 1;
1105 SCTPDBG(SCTP_DEBUG_XXX,
1106 "Looking at control:%p e(%d) ssn:%d top_fsn:%d inc_fsn:%d (nxtdel:%d)- o\n",
1107 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
1109 nctl = TAILQ_NEXT(control, next_instrm);
1110 if ((control->sinfo_ssn == next_to_del) &&
1111 (control->first_frag_seen)) {
1112 /* Ok we can deliver it onto the stream. */
1113 if (control->end_added) {
1114 /* We are done with it afterwards */
1115 if (control->on_strm_q) {
1116 if (control->on_strm_q != SCTP_ON_ORDERED) {
1117 panic("Huh control:%p on_q:%d -- not ordered?",
1118 control, control->on_strm_q);
1120 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1121 control->on_strm_q = 0;
1125 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1127 * A singleton now slipping through - mark
1128 * it non-revokable too
1130 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1131 } else if (control->end_added == 0) {
1133 * Check if we can defer adding until its
1136 if ((control->length < pd_point) || (strm->pd_api_started)) {
1138 * Don't need it or cannot add more
1139 * (one being delivered that way)
1144 if (control->on_read_q == 0) {
1145 sctp_add_to_readq(stcb->sctp_ep, stcb,
1147 &stcb->sctp_socket->so_rcv, control->end_added,
1148 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1150 strm->last_sequence_delivered = next_to_del;
1151 if ((control->end_added) && (control->last_frag_seen)) {
1155 /* We are now doing PD API */
1156 strm->pd_api_started = 1;
1157 control->pdapi_started = 1;
1166 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1167 struct sctp_stream_in *strm,
1168 struct sctp_tcb *stcb, struct sctp_association *asoc,
1169 struct sctp_tmit_chunk *chk)
1172 * Given a control and a chunk, merge the data from the chk onto the
1173 * control and free up the chunk resources.
1177 if (control->on_read_q) {
1179 * Its being pd-api'd so we must do some locks.
1181 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1184 if (control->data == NULL) {
1185 control->data = chk->data;
1186 sctp_setup_tail_pointer(control);
1188 sctp_add_to_tail_pointer(control, chk->data);
1190 control->fsn_included = chk->rec.data.fsn_num;
1191 asoc->size_on_reasm_queue -= chk->send_size;
1192 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1193 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1195 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1196 control->first_frag_seen = 1;
1198 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1200 if ((control->on_strm_q) && (control->on_read_q)) {
1201 if (control->pdapi_started) {
1202 control->pdapi_started = 0;
1203 strm->pd_api_started = 0;
1205 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1207 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1208 control->on_strm_q = 0;
1209 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1211 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1212 control->on_strm_q = 0;
1213 } else if (control->on_strm_q) {
1214 panic("Unknown state on ctrl:%p on_strm_q:%d", control,
1215 control->on_strm_q);
1218 control->end_added = 1;
1219 control->last_frag_seen = 1;
1222 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1224 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1228 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1229 * queue, see if anthing can be delivered. If so pull it off (or as much as
1230 * we can. If we run out of space then we must dump what we can and set the
1231 * appropriate flag to say we queued what we could.
1234 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1235 struct sctp_stream_in *strm,
1236 struct sctp_queued_to_read *control,
1237 struct sctp_tmit_chunk *chk,
1238 int created_control,
1239 int *abort_flag, uint32_t tsn)
1242 struct sctp_tmit_chunk *at, *nat;
1243 int cnt_added, unordered;
1246 * For old un-ordered data chunks.
1248 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1253 /* Must be added to the stream-in queue */
1254 if (created_control) {
1255 if (sctp_place_control_in_stream(strm, asoc, control)) {
1256 /* Duplicate SSN? */
1257 clean_up_control(stcb, control);
1258 sctp_abort_in_reasm(stcb, strm, control, chk,
1260 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1263 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1265 * Ok we created this control and now lets validate
1266 * that its legal i.e. there is a B bit set, if not
1267 * and we have up to the cum-ack then its invalid.
1269 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1270 sctp_abort_in_reasm(stcb, strm, control, chk,
1272 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1277 if ((asoc->idata_supported == 0) && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1278 sctp_inject_old_data_unordered(stcb, asoc, strm, control, chk, abort_flag);
1282 * Ok we must queue the chunk into the reasembly portion: o if its
1283 * the first it goes to the control mbuf. o if its not first but the
1284 * next in sequence it goes to the control, and each succeeding one
1285 * in order also goes. o if its not in order we place it on the list
1288 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1289 /* Its the very first one. */
1290 SCTPDBG(SCTP_DEBUG_XXX,
1291 "chunk is a first fsn:%d becomes fsn_included\n",
1292 chk->rec.data.fsn_num);
1293 if (control->first_frag_seen) {
1295 * Error on senders part, they either sent us two
1296 * data chunks with FIRST, or they sent two
1297 * un-ordered chunks that were fragmented at the
1298 * same time in the same stream.
1300 sctp_abort_in_reasm(stcb, strm, control, chk,
1302 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1305 control->first_frag_seen = 1;
1306 control->fsn_included = chk->rec.data.fsn_num;
1307 control->data = chk->data;
1308 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1310 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1311 sctp_setup_tail_pointer(control);
1313 /* Place the chunk in our list */
1316 if (control->last_frag_seen == 0) {
1317 /* Still willing to raise highest FSN seen */
1318 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1319 SCTPDBG(SCTP_DEBUG_XXX,
1320 "We have a new top_fsn:%d\n",
1321 chk->rec.data.fsn_num);
1322 control->top_fsn = chk->rec.data.fsn_num;
1324 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1325 SCTPDBG(SCTP_DEBUG_XXX,
1326 "The last fsn is now in place fsn:%d\n",
1327 chk->rec.data.fsn_num);
1328 control->last_frag_seen = 1;
1330 if (asoc->idata_supported || control->first_frag_seen) {
1332 * For IDATA we always check since we know
1333 * that the first fragment is 0. For old
1334 * DATA we have to receive the first before
1335 * we knwo the first FSN (which is the TSN).
1337 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1339 * We have already delivered up to
1342 sctp_abort_in_reasm(stcb, strm, control, chk,
1344 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1349 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1350 /* Second last? huh? */
1351 SCTPDBG(SCTP_DEBUG_XXX,
1352 "Duplicate last fsn:%d (top:%d) -- abort\n",
1353 chk->rec.data.fsn_num, control->top_fsn);
1354 sctp_abort_in_reasm(stcb, strm, control,
1356 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1359 if (asoc->idata_supported || control->first_frag_seen) {
1361 * For IDATA we always check since we know
1362 * that the first fragment is 0. For old
1363 * DATA we have to receive the first before
1364 * we knwo the first FSN (which is the TSN).
1367 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1369 * We have already delivered up to
1372 SCTPDBG(SCTP_DEBUG_XXX,
1373 "New fsn:%d is already seen in included_fsn:%d -- abort\n",
1374 chk->rec.data.fsn_num, control->fsn_included);
1375 sctp_abort_in_reasm(stcb, strm, control, chk,
1377 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1382 * validate not beyond top FSN if we have seen last
1385 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1386 SCTPDBG(SCTP_DEBUG_XXX,
1387 "New fsn:%d is beyond or at top_fsn:%d -- abort\n",
1388 chk->rec.data.fsn_num,
1390 sctp_abort_in_reasm(stcb, strm, control, chk,
1392 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1397 * If we reach here, we need to place the new chunk in the
1398 * reassembly for this control.
1400 SCTPDBG(SCTP_DEBUG_XXX,
1401 "chunk is a not first fsn:%d needs to be inserted\n",
1402 chk->rec.data.fsn_num);
1403 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1404 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
1406 * This one in queue is bigger than the new
1407 * one, insert the new one before at.
1409 SCTPDBG(SCTP_DEBUG_XXX,
1410 "Insert it before fsn:%d\n",
1411 at->rec.data.fsn_num);
1412 asoc->size_on_reasm_queue += chk->send_size;
1413 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1414 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1417 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
1419 * Gak, He sent me a duplicate str seq
1423 * foo bar, I guess I will just free this
1424 * new guy, should we abort too? FIX ME
1425 * MAYBE? Or it COULD be that the SSN's have
1426 * wrapped. Maybe I should compare to TSN
1427 * somehow... sigh for now just blow away
1430 SCTPDBG(SCTP_DEBUG_XXX,
1431 "Duplicate to fsn:%d -- abort\n",
1432 at->rec.data.fsn_num);
1433 sctp_abort_in_reasm(stcb, strm, control,
1435 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1439 if (inserted == 0) {
1440 /* Goes on the end */
1441 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn:%d\n",
1442 chk->rec.data.fsn_num);
1443 asoc->size_on_reasm_queue += chk->send_size;
1444 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1445 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1449 * Ok lets see if we can suck any up into the control structure that
1450 * are in seq if it makes sense.
1454 * If the first fragment has not been seen there is no sense in
1457 if (control->first_frag_seen) {
1458 next_fsn = control->fsn_included + 1;
1459 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1460 if (at->rec.data.fsn_num == next_fsn) {
1461 /* We can add this one now to the control */
1462 SCTPDBG(SCTP_DEBUG_XXX,
1463 "Adding more to control:%p at:%p fsn:%d next_fsn:%d included:%d\n",
1465 at->rec.data.fsn_num,
1466 next_fsn, control->fsn_included);
1467 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1468 sctp_add_chk_to_control(control, strm, stcb, asoc, at);
1471 if (control->end_added && control->pdapi_started) {
1472 if (strm->pd_api_started) {
1473 strm->pd_api_started = 0;
1474 control->pdapi_started = 0;
1476 if (control->on_read_q == 0) {
1477 sctp_add_to_readq(stcb->sctp_ep, stcb,
1479 &stcb->sctp_socket->so_rcv, control->end_added,
1480 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1489 if ((control->on_read_q) && (cnt_added > 0)) {
1490 /* Need to wakeup the reader */
1491 sctp_wakeup_the_read_socket(stcb->sctp_ep);
1495 static struct sctp_queued_to_read *
1496 find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
1498 struct sctp_queued_to_read *reasm;
1501 TAILQ_FOREACH(reasm, &strm->inqueue, next_instrm) {
1502 if (reasm->msg_id == msg_id) {
1508 reasm = TAILQ_FIRST(&strm->uno_inqueue);
1511 TAILQ_FOREACH(reasm, &strm->uno_inqueue, next_instrm) {
1512 if (reasm->msg_id == msg_id) {
1522 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1523 struct mbuf **m, int offset, int chk_length,
1524 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1525 int *break_flag, int last_chunk, uint8_t chtype)
1527 /* Process a data chunk */
1528 /* struct sctp_tmit_chunk *chk; */
1529 struct sctp_data_chunk *ch;
1530 struct sctp_idata_chunk *nch, chunk_buf;
1531 struct sctp_tmit_chunk *chk;
1532 uint32_t tsn, fsn, gap, msg_id;
1535 int need_reasm_check = 0;
1537 struct mbuf *op_err;
1538 char msg[SCTP_DIAG_INFO_LEN];
1539 struct sctp_queued_to_read *control = NULL;
1540 uint32_t protocol_id;
1541 uint8_t chunk_flags;
1542 struct sctp_stream_reset_list *liste;
1543 struct sctp_stream_in *strm;
1546 int created_control = 0;
1550 if (chtype == SCTP_IDATA) {
1551 nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1552 sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1554 ch = (struct sctp_data_chunk *)nch;
1555 clen = sizeof(struct sctp_idata_chunk);
1556 tsn = ntohl(ch->dp.tsn);
1557 msg_id = ntohl(nch->dp.msg_id);
1558 if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1561 fsn = ntohl(nch->dp.fsn);
1564 ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1565 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1567 tsn = ntohl(ch->dp.tsn);
1568 clen = sizeof(struct sctp_data_chunk);
1570 msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
1574 chunk_flags = ch->ch.chunk_flags;
1575 if ((size_t)chk_length == clen) {
1577 * Need to send an abort since we had a empty data chunk.
1579 struct mbuf *op_err;
1581 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1582 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1583 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1587 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1588 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1589 asoc->send_sack = 1;
1591 protocol_id = ch->dp.protocol_id;
1592 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1593 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1594 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1599 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1600 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1601 /* It is a duplicate */
1602 SCTP_STAT_INCR(sctps_recvdupdata);
1603 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1604 /* Record a dup for the next outbound sack */
1605 asoc->dup_tsns[asoc->numduptsns] = tsn;
1608 asoc->send_sack = 1;
1611 /* Calculate the number of TSN's between the base and this TSN */
1612 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1613 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1614 /* Can't hold the bit in the mapping at max array, toss it */
1617 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1618 SCTP_TCB_LOCK_ASSERT(stcb);
1619 if (sctp_expand_mapping_array(asoc, gap)) {
1620 /* Can't expand, drop it */
1624 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1627 /* See if we have received this one already */
1628 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1629 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1630 SCTP_STAT_INCR(sctps_recvdupdata);
1631 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1632 /* Record a dup for the next outbound sack */
1633 asoc->dup_tsns[asoc->numduptsns] = tsn;
1636 asoc->send_sack = 1;
1640 * Check to see about the GONE flag, duplicates would cause a sack
1641 * to be sent up above
1643 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1644 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1645 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1647 * wait a minute, this guy is gone, there is no longer a
1648 * receiver. Send peer an ABORT!
1650 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1651 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1656 * Now before going further we see if there is room. If NOT then we
1657 * MAY let one through only IF this TSN is the one we are waiting
1658 * for on a partial delivery API.
1661 /* Is the stream valid? */
1662 strmno = ntohs(ch->dp.stream_id);
1664 if (strmno >= asoc->streamincnt) {
1665 struct sctp_error_invalid_stream *cause;
1667 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1668 0, M_NOWAIT, 1, MT_DATA);
1669 if (op_err != NULL) {
1670 /* add some space up front so prepend will work well */
1671 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1672 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1674 * Error causes are just param's and this one has
1675 * two back to back phdr, one with the error type
1676 * and size, the other with the streamid and a rsvd
1678 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1679 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1680 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1681 cause->stream_id = ch->dp.stream_id;
1682 cause->reserved = htons(0);
1683 sctp_queue_op_err(stcb, op_err);
1685 SCTP_STAT_INCR(sctps_badsid);
1686 SCTP_TCB_LOCK_ASSERT(stcb);
1687 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1688 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1689 asoc->highest_tsn_inside_nr_map = tsn;
1691 if (tsn == (asoc->cumulative_tsn + 1)) {
1692 /* Update cum-ack */
1693 asoc->cumulative_tsn = tsn;
1697 strm = &asoc->strmin[strmno];
1699 * If its a fragmented message, lets see if we can find the control
1700 * on the reassembly queues.
1702 if ((chtype == SCTP_IDATA) && ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) && (fsn == 0)) {
1704 * The first *must* be fsn 0, and other (middle/end) pieces
1705 * can *not* be fsn 0.
1709 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1710 /* See if we can find the re-assembly entity */
1711 control = find_reasm_entry(strm, msg_id, ordered, old_data);
1712 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1713 chunk_flags, control);
1715 /* We found something, does it belong? */
1716 if (ordered && (msg_id != control->sinfo_ssn)) {
1718 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1719 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1720 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1724 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1726 * We can't have a switched order with an
1731 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1733 * We can't have a switched unordered with a
1741 * Its a complete segment. Lets validate we don't have a
1742 * re-assembly going on with the same Stream/Seq (for
1743 * ordered) or in the same Stream for unordered.
1745 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for msg in case we have dup\n",
1747 if (find_reasm_entry(strm, msg_id, ordered, old_data)) {
1748 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x dup detected on msg_id:%d\n",
1755 /* now do the tests */
1756 if (((asoc->cnt_on_all_streams +
1757 asoc->cnt_on_reasm_queue +
1758 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1759 (((int)asoc->my_rwnd) <= 0)) {
1761 * When we have NO room in the rwnd we check to make sure
1762 * the reader is doing its job...
1764 if (stcb->sctp_socket->so_rcv.sb_cc) {
1765 /* some to read, wake-up */
1766 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1769 so = SCTP_INP_SO(stcb->sctp_ep);
1770 atomic_add_int(&stcb->asoc.refcnt, 1);
1771 SCTP_TCB_UNLOCK(stcb);
1772 SCTP_SOCKET_LOCK(so, 1);
1773 SCTP_TCB_LOCK(stcb);
1774 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1775 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1776 /* assoc was freed while we were unlocked */
1777 SCTP_SOCKET_UNLOCK(so, 1);
1781 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1782 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1783 SCTP_SOCKET_UNLOCK(so, 1);
1786 /* now is it in the mapping array of what we have accepted? */
1788 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1789 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1790 /* Nope not in the valid range dump it */
1792 sctp_set_rwnd(stcb, asoc);
1793 if ((asoc->cnt_on_all_streams +
1794 asoc->cnt_on_reasm_queue +
1795 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1796 SCTP_STAT_INCR(sctps_datadropchklmt);
1798 SCTP_STAT_INCR(sctps_datadroprwnd);
1804 if (control == NULL) {
1807 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1812 #ifdef SCTP_ASOCLOG_OF_TSNS
1813 SCTP_TCB_LOCK_ASSERT(stcb);
1814 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1815 asoc->tsn_in_at = 0;
1816 asoc->tsn_in_wrapped = 1;
1818 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1819 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1820 asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
1821 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1822 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1823 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1824 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1825 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1829 * Before we continue lets validate that we are not being fooled by
1830 * an evil attacker. We can only have Nk chunks based on our TSN
1831 * spread allowed by the mapping array N * 8 bits, so there is no
1832 * way our stream sequence numbers could have wrapped. We of course
1833 * only validate the FIRST fragment so the bit must be set.
1835 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1836 (TAILQ_EMPTY(&asoc->resetHead)) &&
1837 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1838 SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
1839 /* The incoming sseq is behind where we last delivered? */
1840 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1841 msg_id, asoc->strmin[strmno].last_sequence_delivered);
1843 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1844 asoc->strmin[strmno].last_sequence_delivered,
1845 tsn, strmno, msg_id);
1846 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1847 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1848 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1852 /************************************
1853 * From here down we may find ch-> invalid
1854 * so its a good idea NOT to use it.
1855 *************************************/
1857 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1859 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1861 if (last_chunk == 0) {
1863 dmbuf = SCTP_M_COPYM(*m,
1864 (offset + sizeof(struct sctp_idata_chunk)),
1867 dmbuf = SCTP_M_COPYM(*m,
1868 (offset + sizeof(struct sctp_data_chunk)),
1871 #ifdef SCTP_MBUF_LOGGING
1872 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1873 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1877 /* We can steal the last chunk */
1881 /* lop off the top part */
1883 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1885 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1887 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1888 l_len = SCTP_BUF_LEN(dmbuf);
1891 * need to count up the size hopefully does not hit
1897 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1898 l_len += SCTP_BUF_LEN(lat);
1901 if (l_len > the_len) {
1902 /* Trim the end round bytes off too */
1903 m_adj(dmbuf, -(l_len - the_len));
1906 if (dmbuf == NULL) {
1907 SCTP_STAT_INCR(sctps_nomem);
1911 * Now no matter what we need a control, get one if we don't have
1912 * one (we may have gotten it above when we found the message was
1915 if (control == NULL) {
1916 sctp_alloc_a_readq(stcb, control);
1917 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1922 if (control == NULL) {
1923 SCTP_STAT_INCR(sctps_nomem);
1926 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1927 control->data = dmbuf;
1928 control->tail_mbuf = NULL;
1929 control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1930 control->top_fsn = control->fsn_included = fsn;
1932 created_control = 1;
1934 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x ordered:%d msgid:%d control:%p\n",
1935 chunk_flags, ordered, msg_id, control);
1936 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1937 TAILQ_EMPTY(&asoc->resetHead) &&
1939 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
1940 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1941 /* Candidate for express delivery */
1943 * Its not fragmented, No PD-API is up, Nothing in the
1944 * delivery queue, Its un-ordered OR ordered and the next to
1945 * deliver AND nothing else is stuck on the stream queue,
1946 * And there is room for it in the socket buffer. Lets just
1947 * stuff it up the buffer....
1949 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1950 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1951 asoc->highest_tsn_inside_nr_map = tsn;
1953 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control:%p to be read (mid:%d)\n",
1956 sctp_add_to_readq(stcb->sctp_ep, stcb,
1957 control, &stcb->sctp_socket->so_rcv,
1958 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1960 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1961 /* for ordered, bump what we delivered */
1962 strm->last_sequence_delivered++;
1964 SCTP_STAT_INCR(sctps_recvexpress);
1965 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1966 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
1967 SCTP_STR_LOG_FROM_EXPRS_DEL);
1970 goto finish_express_del;
1972 /* Now will we need a chunk too? */
1973 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1974 sctp_alloc_a_chunk(stcb, chk);
1976 /* No memory so we drop the chunk */
1977 SCTP_STAT_INCR(sctps_nomem);
1978 if (last_chunk == 0) {
1979 /* we copied it, free the copy */
1980 sctp_m_freem(dmbuf);
1984 chk->rec.data.TSN_seq = tsn;
1985 chk->no_fr_allowed = 0;
1986 chk->rec.data.fsn_num = fsn;
1987 chk->rec.data.stream_seq = msg_id;
1988 chk->rec.data.stream_number = strmno;
1989 chk->rec.data.payloadtype = protocol_id;
1990 chk->rec.data.context = stcb->asoc.context;
1991 chk->rec.data.doing_fast_retransmit = 0;
1992 chk->rec.data.rcv_flags = chunk_flags;
1994 chk->send_size = the_len;
1996 SCTPDBG(SCTP_DEBUG_XXX, "Building ck:%p for control:%p to be read (mid:%d)\n",
1999 atomic_add_int(&net->ref_count, 1);
2002 /* Set the appropriate TSN mark */
2003 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2004 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2005 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2006 asoc->highest_tsn_inside_nr_map = tsn;
2009 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2010 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2011 asoc->highest_tsn_inside_map = tsn;
2014 /* Now is it complete (i.e. not fragmented)? */
2015 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2017 * Special check for when streams are resetting. We could be
2018 * more smart about this and check the actual stream to see
2019 * if it is not being reset.. that way we would not create a
2020 * HOLB when amongst streams being reset and those not being
2024 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2025 SCTP_TSN_GT(tsn, liste->tsn)) {
2027 * yep its past where we need to reset... go ahead
2030 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2032 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2034 struct sctp_queued_to_read *ctlOn, *nctlOn;
2035 unsigned char inserted = 0;
2037 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2038 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2043 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2048 if (inserted == 0) {
2050 * must be put at end, use prevP
2051 * (all setup from loop) to setup
2054 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2057 goto finish_express_del;
2059 if (chunk_flags & SCTP_DATA_UNORDERED) {
2060 /* queue directly into socket buffer */
2061 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control:%p msg_id:%d\n",
2063 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2064 sctp_add_to_readq(stcb->sctp_ep, stcb,
2066 &stcb->sctp_socket->so_rcv, 1,
2067 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2070 SCTPDBG(SCTP_DEBUG_XXX, "Queue control:%p for reordering msg_id:%d\n", control,
2072 sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2080 goto finish_express_del;
2082 /* If we reach here its a reassembly */
2083 need_reasm_check = 1;
2084 SCTPDBG(SCTP_DEBUG_XXX,
2085 "Queue data to stream for reasm control:%p msg_id:%d\n",
2087 sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2090 * the assoc is now gone and chk was put onto the reasm
2091 * queue, which has all been freed.
2099 /* Here we tidy up things */
2100 if (tsn == (asoc->cumulative_tsn + 1)) {
2101 /* Update cum-ack */
2102 asoc->cumulative_tsn = tsn;
2108 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2110 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2112 SCTP_STAT_INCR(sctps_recvdata);
2113 /* Set it present please */
2114 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2115 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2117 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2118 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2119 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2121 /* check the special flag for stream resets */
2122 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2123 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2125 * we have finished working through the backlogged TSN's now
2126 * time to reset streams. 1: call reset function. 2: free
2127 * pending_reply space 3: distribute any chunks in
2128 * pending_reply_queue.
2130 struct sctp_queued_to_read *ctl, *nctl;
2132 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2133 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2134 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2135 SCTP_FREE(liste, SCTP_M_STRESET);
2136 /* sa_ignore FREED_MEMORY */
2137 liste = TAILQ_FIRST(&asoc->resetHead);
2138 if (TAILQ_EMPTY(&asoc->resetHead)) {
2139 /* All can be removed */
2140 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2141 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2142 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2148 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2149 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2153 * if ctl->sinfo_tsn is <= liste->tsn we can
2154 * process it which is the NOT of
2155 * ctl->sinfo_tsn > liste->tsn
2157 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2158 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2165 * Now service re-assembly to pick up anything that has been
2166 * held on reassembly queue?
2168 (void)sctp_deliver_reasm_check(stcb, asoc, strm);
2169 need_reasm_check = 0;
2171 if (need_reasm_check) {
2172 /* Another one waits ? */
2173 (void)sctp_deliver_reasm_check(stcb, asoc, strm);
2178 static const int8_t sctp_map_lookup_tab[256] = {
2179 0, 1, 0, 2, 0, 1, 0, 3,
2180 0, 1, 0, 2, 0, 1, 0, 4,
2181 0, 1, 0, 2, 0, 1, 0, 3,
2182 0, 1, 0, 2, 0, 1, 0, 5,
2183 0, 1, 0, 2, 0, 1, 0, 3,
2184 0, 1, 0, 2, 0, 1, 0, 4,
2185 0, 1, 0, 2, 0, 1, 0, 3,
2186 0, 1, 0, 2, 0, 1, 0, 6,
2187 0, 1, 0, 2, 0, 1, 0, 3,
2188 0, 1, 0, 2, 0, 1, 0, 4,
2189 0, 1, 0, 2, 0, 1, 0, 3,
2190 0, 1, 0, 2, 0, 1, 0, 5,
2191 0, 1, 0, 2, 0, 1, 0, 3,
2192 0, 1, 0, 2, 0, 1, 0, 4,
2193 0, 1, 0, 2, 0, 1, 0, 3,
2194 0, 1, 0, 2, 0, 1, 0, 7,
2195 0, 1, 0, 2, 0, 1, 0, 3,
2196 0, 1, 0, 2, 0, 1, 0, 4,
2197 0, 1, 0, 2, 0, 1, 0, 3,
2198 0, 1, 0, 2, 0, 1, 0, 5,
2199 0, 1, 0, 2, 0, 1, 0, 3,
2200 0, 1, 0, 2, 0, 1, 0, 4,
2201 0, 1, 0, 2, 0, 1, 0, 3,
2202 0, 1, 0, 2, 0, 1, 0, 6,
2203 0, 1, 0, 2, 0, 1, 0, 3,
2204 0, 1, 0, 2, 0, 1, 0, 4,
2205 0, 1, 0, 2, 0, 1, 0, 3,
2206 0, 1, 0, 2, 0, 1, 0, 5,
2207 0, 1, 0, 2, 0, 1, 0, 3,
2208 0, 1, 0, 2, 0, 1, 0, 4,
2209 0, 1, 0, 2, 0, 1, 0, 3,
2210 0, 1, 0, 2, 0, 1, 0, 8
2215 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2218 * Now we also need to check the mapping array in a couple of ways.
2219 * 1) Did we move the cum-ack point?
2221 * When you first glance at this you might think that all entries that
2222 * make up the postion of the cum-ack would be in the nr-mapping
2223 * array only.. i.e. things up to the cum-ack are always
2224 * deliverable. Thats true with one exception, when its a fragmented
2225 * message we may not deliver the data until some threshold (or all
2226 * of it) is in place. So we must OR the nr_mapping_array and
2227 * mapping_array to get a true picture of the cum-ack.
2229 struct sctp_association *asoc;
2232 int slide_from, slide_end, lgap, distance;
2233 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2237 old_cumack = asoc->cumulative_tsn;
2238 old_base = asoc->mapping_array_base_tsn;
2239 old_highest = asoc->highest_tsn_inside_map;
2241 * We could probably improve this a small bit by calculating the
2242 * offset of the current cum-ack as the starting point.
2245 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2246 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2250 /* there is a 0 bit */
2251 at += sctp_map_lookup_tab[val];
2255 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2257 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2258 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2260 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2261 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2263 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2264 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2265 sctp_print_mapping_array(asoc);
2266 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2267 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2269 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2270 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2273 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2274 highest_tsn = asoc->highest_tsn_inside_nr_map;
2276 highest_tsn = asoc->highest_tsn_inside_map;
2278 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2279 /* The complete array was completed by a single FR */
2280 /* highest becomes the cum-ack */
2288 /* clear the array */
2289 clr = ((at + 7) >> 3);
2290 if (clr > asoc->mapping_array_size) {
2291 clr = asoc->mapping_array_size;
2293 memset(asoc->mapping_array, 0, clr);
2294 memset(asoc->nr_mapping_array, 0, clr);
2296 for (i = 0; i < asoc->mapping_array_size; i++) {
2297 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2298 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2299 sctp_print_mapping_array(asoc);
2303 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2304 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2305 } else if (at >= 8) {
2306 /* we can slide the mapping array down */
2307 /* slide_from holds where we hit the first NON 0xff byte */
2310 * now calculate the ceiling of the move using our highest
2313 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2314 slide_end = (lgap >> 3);
2315 if (slide_end < slide_from) {
2316 sctp_print_mapping_array(asoc);
2318 panic("impossible slide");
2320 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2321 lgap, slide_end, slide_from, at);
2325 if (slide_end > asoc->mapping_array_size) {
2327 panic("would overrun buffer");
2329 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2330 asoc->mapping_array_size, slide_end);
2331 slide_end = asoc->mapping_array_size;
2334 distance = (slide_end - slide_from) + 1;
2335 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2336 sctp_log_map(old_base, old_cumack, old_highest,
2337 SCTP_MAP_PREPARE_SLIDE);
2338 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2339 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2341 if (distance + slide_from > asoc->mapping_array_size ||
2344 * Here we do NOT slide forward the array so that
2345 * hopefully when more data comes in to fill it up
2346 * we will be able to slide it forward. Really I
2347 * don't think this should happen :-0
2350 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2351 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2352 (uint32_t) asoc->mapping_array_size,
2353 SCTP_MAP_SLIDE_NONE);
2358 for (ii = 0; ii < distance; ii++) {
2359 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2360 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2363 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2364 asoc->mapping_array[ii] = 0;
2365 asoc->nr_mapping_array[ii] = 0;
2367 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2368 asoc->highest_tsn_inside_map += (slide_from << 3);
2370 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2371 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2373 asoc->mapping_array_base_tsn += (slide_from << 3);
2374 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2375 sctp_log_map(asoc->mapping_array_base_tsn,
2376 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2377 SCTP_MAP_SLIDE_RESULT);
2384 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2386 struct sctp_association *asoc;
2387 uint32_t highest_tsn;
2390 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2391 highest_tsn = asoc->highest_tsn_inside_nr_map;
2393 highest_tsn = asoc->highest_tsn_inside_map;
2397 * Now we need to see if we need to queue a sack or just start the
2398 * timer (if allowed).
2400 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2402 * Ok special case, in SHUTDOWN-SENT case. here we maker
2403 * sure SACK timer is off and instead send a SHUTDOWN and a
2406 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2407 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2408 stcb->sctp_ep, stcb, NULL,
2409 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2411 sctp_send_shutdown(stcb,
2412 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2413 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2417 /* is there a gap now ? */
2418 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2421 * CMT DAC algorithm: increase number of packets received
2424 stcb->asoc.cmt_dac_pkts_rcvd++;
2426 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2428 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2430 (stcb->asoc.numduptsns) || /* we have dup's */
2431 (is_a_gap) || /* is still a gap */
2432 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2433 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2436 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2437 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2438 (stcb->asoc.send_sack == 0) &&
2439 (stcb->asoc.numduptsns == 0) &&
2440 (stcb->asoc.delayed_ack) &&
2441 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2444 * CMT DAC algorithm: With CMT, delay acks
2445 * even in the face of
2447 * reordering. Therefore, if acks that do not
2448 * have to be sent because of the above
2449 * reasons, will be delayed. That is, acks
2450 * that would have been sent due to gap
2451 * reports will be delayed with DAC. Start
2452 * the delayed ack timer.
2454 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2455 stcb->sctp_ep, stcb, NULL);
2458 * Ok we must build a SACK since the timer
2459 * is pending, we got our first packet OR
2460 * there are gaps or duplicates.
2462 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2463 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2466 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2467 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2468 stcb->sctp_ep, stcb, NULL);
2475 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2476 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2477 struct sctp_nets *net, uint32_t * high_tsn)
2479 struct sctp_chunkhdr *ch, chunk_buf;
2480 struct sctp_association *asoc;
2481 int num_chunks = 0; /* number of control chunks processed */
2483 int chk_length, break_flag, last_chunk;
2484 int abort_flag = 0, was_a_gap;
2486 uint32_t highest_tsn;
2489 sctp_set_rwnd(stcb, &stcb->asoc);
2492 SCTP_TCB_LOCK_ASSERT(stcb);
2494 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2495 highest_tsn = asoc->highest_tsn_inside_nr_map;
2497 highest_tsn = asoc->highest_tsn_inside_map;
2499 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2501 * setup where we got the last DATA packet from for any SACK that
2502 * may need to go out. Don't bump the net. This is done ONLY when a
2503 * chunk is assigned.
2505 asoc->last_data_chunk_from = net;
2508 * Now before we proceed we must figure out if this is a wasted
2509 * cluster... i.e. it is a small packet sent in and yet the driver
2510 * underneath allocated a full cluster for it. If so we must copy it
2511 * to a smaller mbuf and free up the cluster mbuf. This will help
2512 * with cluster starvation. Note for __Panda__ we don't do this
2513 * since it has clusters all the way down to 64 bytes.
2515 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2516 /* we only handle mbufs that are singletons.. not chains */
2517 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2519 /* ok lets see if we can copy the data up */
2522 /* get the pointers and copy */
2523 to = mtod(m, caddr_t *);
2524 from = mtod((*mm), caddr_t *);
2525 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2526 /* copy the length and free up the old */
2527 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2529 /* sucess, back copy */
2532 /* We are in trouble in the mbuf world .. yikes */
2536 /* get pointer to the first chunk header */
2537 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2538 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2543 * process all DATA chunks...
2545 *high_tsn = asoc->cumulative_tsn;
2547 asoc->data_pkts_seen++;
2548 while (stop_proc == 0) {
2549 /* validate chunk length */
2550 chk_length = ntohs(ch->chunk_length);
2551 if (length - *offset < chk_length) {
2552 /* all done, mutulated chunk */
2556 if ((asoc->idata_supported == 1) &&
2557 (ch->chunk_type == SCTP_DATA)) {
2558 struct mbuf *op_err;
2559 char msg[SCTP_DIAG_INFO_LEN];
2561 snprintf(msg, sizeof(msg), "I-DATA chunk received when DATA was negotiated");
2562 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2563 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2564 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2567 if ((asoc->idata_supported == 0) &&
2568 (ch->chunk_type == SCTP_IDATA)) {
2569 struct mbuf *op_err;
2570 char msg[SCTP_DIAG_INFO_LEN];
2572 snprintf(msg, sizeof(msg), "DATA chunk received when I-DATA was negotiated");
2573 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2574 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2575 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2578 if ((ch->chunk_type == SCTP_DATA) ||
2579 (ch->chunk_type == SCTP_IDATA)) {
2582 if (ch->chunk_type == SCTP_DATA) {
2583 clen = sizeof(struct sctp_data_chunk);
2585 clen = sizeof(struct sctp_idata_chunk);
2587 if ((size_t)chk_length < clen) {
2589 * Need to send an abort since we had a
2590 * invalid data chunk.
2592 struct mbuf *op_err;
2593 char msg[SCTP_DIAG_INFO_LEN];
2595 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2597 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2598 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2599 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2602 #ifdef SCTP_AUDITING_ENABLED
2603 sctp_audit_log(0xB1, 0);
2605 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2610 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2611 chk_length, net, high_tsn, &abort_flag, &break_flag,
2612 last_chunk, ch->chunk_type)) {
2620 * Set because of out of rwnd space and no
2621 * drop rep space left.
2627 /* not a data chunk in the data region */
2628 switch (ch->chunk_type) {
2629 case SCTP_INITIATION:
2630 case SCTP_INITIATION_ACK:
2631 case SCTP_SELECTIVE_ACK:
2632 case SCTP_NR_SELECTIVE_ACK:
2633 case SCTP_HEARTBEAT_REQUEST:
2634 case SCTP_HEARTBEAT_ACK:
2635 case SCTP_ABORT_ASSOCIATION:
2637 case SCTP_SHUTDOWN_ACK:
2638 case SCTP_OPERATION_ERROR:
2639 case SCTP_COOKIE_ECHO:
2640 case SCTP_COOKIE_ACK:
2643 case SCTP_SHUTDOWN_COMPLETE:
2644 case SCTP_AUTHENTICATION:
2645 case SCTP_ASCONF_ACK:
2646 case SCTP_PACKET_DROPPED:
2647 case SCTP_STREAM_RESET:
2648 case SCTP_FORWARD_CUM_TSN:
2651 * Now, what do we do with KNOWN chunks that
2652 * are NOT in the right place?
2654 * For now, I do nothing but ignore them. We
2655 * may later want to add sysctl stuff to
2656 * switch out and do either an ABORT() or
2657 * possibly process them.
2659 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2660 struct mbuf *op_err;
2661 char msg[SCTP_DIAG_INFO_LEN];
2663 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2665 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2666 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2671 /* unknown chunk type, use bit rules */
2672 if (ch->chunk_type & 0x40) {
2673 /* Add a error report to the queue */
2674 struct mbuf *op_err;
2675 struct sctp_gen_error_cause *cause;
2677 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2678 0, M_NOWAIT, 1, MT_DATA);
2679 if (op_err != NULL) {
2680 cause = mtod(op_err, struct sctp_gen_error_cause *);
2681 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2682 cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2683 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2684 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2685 if (SCTP_BUF_NEXT(op_err) != NULL) {
2686 sctp_queue_op_err(stcb, op_err);
2688 sctp_m_freem(op_err);
2692 if ((ch->chunk_type & 0x80) == 0) {
2693 /* discard the rest of this packet */
2695 } /* else skip this bad chunk and
2698 } /* switch of chunk type */
2700 *offset += SCTP_SIZE32(chk_length);
2701 if ((*offset >= length) || stop_proc) {
2702 /* no more data left in the mbuf chain */
2706 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2707 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2716 * we need to report rwnd overrun drops.
2718 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2722 * Did we get data, if so update the time for auto-close and
2723 * give peer credit for being alive.
2725 SCTP_STAT_INCR(sctps_recvpktwithdata);
2726 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2727 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2728 stcb->asoc.overall_error_count,
2730 SCTP_FROM_SCTP_INDATA,
2733 stcb->asoc.overall_error_count = 0;
2734 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2736 /* now service all of the reassm queue if needed */
2737 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2738 /* Assure that we ack right away */
2739 stcb->asoc.send_sack = 1;
2741 /* Start a sack timer or QUEUE a SACK for sending */
2742 sctp_sack_check(stcb, was_a_gap);
2747 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2748 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2750 uint32_t * biggest_newly_acked_tsn,
2751 uint32_t * this_sack_lowest_newack,
2754 struct sctp_tmit_chunk *tp1;
2755 unsigned int theTSN;
2756 int j, wake_him = 0, circled = 0;
2758 /* Recover the tp1 we last saw */
2761 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2763 for (j = frag_strt; j <= frag_end; j++) {
2764 theTSN = j + last_tsn;
2766 if (tp1->rec.data.doing_fast_retransmit)
2770 * CMT: CUCv2 algorithm. For each TSN being
2771 * processed from the sent queue, track the
2772 * next expected pseudo-cumack, or
2773 * rtx_pseudo_cumack, if required. Separate
2774 * cumack trackers for first transmissions,
2775 * and retransmissions.
2777 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2778 (tp1->whoTo->find_pseudo_cumack == 1) &&
2779 (tp1->snd_count == 1)) {
2780 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2781 tp1->whoTo->find_pseudo_cumack = 0;
2783 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2784 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2785 (tp1->snd_count > 1)) {
2786 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2787 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2789 if (tp1->rec.data.TSN_seq == theTSN) {
2790 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2792 * must be held until
2795 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2797 * If it is less than RESEND, it is
2798 * now no-longer in flight.
2799 * Higher values may already be set
2800 * via previous Gap Ack Blocks...
2801 * i.e. ACKED or RESEND.
2803 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2804 *biggest_newly_acked_tsn)) {
2805 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2808 * CMT: SFR algo (and HTNA) - set
2809 * saw_newack to 1 for dest being
2810 * newly acked. update
2811 * this_sack_highest_newack if
2814 if (tp1->rec.data.chunk_was_revoked == 0)
2815 tp1->whoTo->saw_newack = 1;
2817 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2818 tp1->whoTo->this_sack_highest_newack)) {
2819 tp1->whoTo->this_sack_highest_newack =
2820 tp1->rec.data.TSN_seq;
2823 * CMT DAC algo: also update
2824 * this_sack_lowest_newack
2826 if (*this_sack_lowest_newack == 0) {
2827 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2828 sctp_log_sack(*this_sack_lowest_newack,
2830 tp1->rec.data.TSN_seq,
2833 SCTP_LOG_TSN_ACKED);
2835 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2838 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2839 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2840 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2841 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2842 * Separate pseudo_cumack trackers for first transmissions and
2845 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2846 if (tp1->rec.data.chunk_was_revoked == 0) {
2847 tp1->whoTo->new_pseudo_cumack = 1;
2849 tp1->whoTo->find_pseudo_cumack = 1;
2851 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2852 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2854 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2855 if (tp1->rec.data.chunk_was_revoked == 0) {
2856 tp1->whoTo->new_pseudo_cumack = 1;
2858 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2860 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2861 sctp_log_sack(*biggest_newly_acked_tsn,
2863 tp1->rec.data.TSN_seq,
2866 SCTP_LOG_TSN_ACKED);
2868 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2869 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2870 tp1->whoTo->flight_size,
2872 (uint32_t) (uintptr_t) tp1->whoTo,
2873 tp1->rec.data.TSN_seq);
2875 sctp_flight_size_decrease(tp1);
2876 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2877 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2880 sctp_total_flight_decrease(stcb, tp1);
2882 tp1->whoTo->net_ack += tp1->send_size;
2883 if (tp1->snd_count < 2) {
2885 * True non-retransmited chunk
2887 tp1->whoTo->net_ack2 += tp1->send_size;
2895 sctp_calculate_rto(stcb,
2898 &tp1->sent_rcv_time,
2899 sctp_align_safe_nocopy,
2900 SCTP_RTT_FROM_DATA);
2903 if (tp1->whoTo->rto_needed == 0) {
2904 tp1->whoTo->rto_needed = 1;
2910 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2911 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2912 stcb->asoc.this_sack_highest_gap)) {
2913 stcb->asoc.this_sack_highest_gap =
2914 tp1->rec.data.TSN_seq;
2916 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2917 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2918 #ifdef SCTP_AUDITING_ENABLED
2919 sctp_audit_log(0xB2,
2920 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2925 * All chunks NOT UNSENT fall through here and are marked
2926 * (leave PR-SCTP ones that are to skip alone though)
2928 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2929 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2930 tp1->sent = SCTP_DATAGRAM_MARKED;
2932 if (tp1->rec.data.chunk_was_revoked) {
2933 /* deflate the cwnd */
2934 tp1->whoTo->cwnd -= tp1->book_size;
2935 tp1->rec.data.chunk_was_revoked = 0;
2937 /* NR Sack code here */
2939 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2940 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2941 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2944 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2947 if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
2948 (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
2949 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
2950 stcb->asoc.trigger_reset = 1;
2952 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2958 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2959 sctp_m_freem(tp1->data);
2966 } /* if (tp1->TSN_seq == theTSN) */
2967 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2970 tp1 = TAILQ_NEXT(tp1, sctp_next);
2971 if ((tp1 == NULL) && (circled == 0)) {
2973 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2975 } /* end while (tp1) */
2978 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2980 /* In case the fragments were not in order we must reset */
2981 } /* end for (j = fragStart */
2983 return (wake_him); /* Return value only used for nr-sack */
2988 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2989 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2990 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2991 int num_seg, int num_nr_seg, int *rto_ok)
2993 struct sctp_gap_ack_block *frag, block;
2994 struct sctp_tmit_chunk *tp1;
2999 uint16_t frag_strt, frag_end, prev_frag_end;
3001 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3005 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3008 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3010 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3011 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3012 *offset += sizeof(block);
3014 return (chunk_freed);
3016 frag_strt = ntohs(frag->start);
3017 frag_end = ntohs(frag->end);
3019 if (frag_strt > frag_end) {
3020 /* This gap report is malformed, skip it. */
3023 if (frag_strt <= prev_frag_end) {
3024 /* This gap report is not in order, so restart. */
3025 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3027 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3028 *biggest_tsn_acked = last_tsn + frag_end;
3035 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3036 non_revocable, &num_frs, biggest_newly_acked_tsn,
3037 this_sack_lowest_newack, rto_ok)) {
3040 prev_frag_end = frag_end;
3042 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3044 sctp_log_fr(*biggest_tsn_acked,
3045 *biggest_newly_acked_tsn,
3046 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3048 return (chunk_freed);
3052 sctp_check_for_revoked(struct sctp_tcb *stcb,
3053 struct sctp_association *asoc, uint32_t cumack,
3054 uint32_t biggest_tsn_acked)
3056 struct sctp_tmit_chunk *tp1;
3058 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3059 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3061 * ok this guy is either ACK or MARKED. If it is
3062 * ACKED it has been previously acked but not this
3063 * time i.e. revoked. If it is MARKED it was ACK'ed
3066 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3069 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3070 /* it has been revoked */
3071 tp1->sent = SCTP_DATAGRAM_SENT;
3072 tp1->rec.data.chunk_was_revoked = 1;
3074 * We must add this stuff back in to assure
3075 * timers and such get started.
3077 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3078 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3079 tp1->whoTo->flight_size,
3081 (uint32_t) (uintptr_t) tp1->whoTo,
3082 tp1->rec.data.TSN_seq);
3084 sctp_flight_size_increase(tp1);
3085 sctp_total_flight_increase(stcb, tp1);
3087 * We inflate the cwnd to compensate for our
3088 * artificial inflation of the flight_size.
3090 tp1->whoTo->cwnd += tp1->book_size;
3091 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3092 sctp_log_sack(asoc->last_acked_seq,
3094 tp1->rec.data.TSN_seq,
3097 SCTP_LOG_TSN_REVOKED);
3099 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3100 /* it has been re-acked in this SACK */
3101 tp1->sent = SCTP_DATAGRAM_ACKED;
3104 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3111 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3112 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3114 struct sctp_tmit_chunk *tp1;
3115 int strike_flag = 0;
3117 int tot_retrans = 0;
3118 uint32_t sending_seq;
3119 struct sctp_nets *net;
3120 int num_dests_sacked = 0;
3123 * select the sending_seq, this is either the next thing ready to be
3124 * sent but not transmitted, OR, the next seq we assign.
3126 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3128 sending_seq = asoc->sending_seq;
3130 sending_seq = tp1->rec.data.TSN_seq;
3133 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3134 if ((asoc->sctp_cmt_on_off > 0) &&
3135 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3136 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3137 if (net->saw_newack)
3141 if (stcb->asoc.prsctp_supported) {
3142 (void)SCTP_GETTIME_TIMEVAL(&now);
3144 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3146 if (tp1->no_fr_allowed) {
3147 /* this one had a timeout or something */
3150 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3151 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3152 sctp_log_fr(biggest_tsn_newly_acked,
3153 tp1->rec.data.TSN_seq,
3155 SCTP_FR_LOG_CHECK_STRIKE);
3157 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3158 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3162 if (stcb->asoc.prsctp_supported) {
3163 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3164 /* Is it expired? */
3165 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3166 /* Yes so drop it */
3167 if (tp1->data != NULL) {
3168 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3169 SCTP_SO_NOT_LOCKED);
3175 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3176 /* we are beyond the tsn in the sack */
3179 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3180 /* either a RESEND, ACKED, or MARKED */
3182 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3183 /* Continue strikin FWD-TSN chunks */
3184 tp1->rec.data.fwd_tsn_cnt++;
3189 * CMT : SFR algo (covers part of DAC and HTNA as well)
3191 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3193 * No new acks were receieved for data sent to this
3194 * dest. Therefore, according to the SFR algo for
3195 * CMT, no data sent to this dest can be marked for
3196 * FR using this SACK.
3199 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3200 tp1->whoTo->this_sack_highest_newack)) {
3202 * CMT: New acks were receieved for data sent to
3203 * this dest. But no new acks were seen for data
3204 * sent after tp1. Therefore, according to the SFR
3205 * algo for CMT, tp1 cannot be marked for FR using
3206 * this SACK. This step covers part of the DAC algo
3207 * and the HTNA algo as well.
3212 * Here we check to see if we were have already done a FR
3213 * and if so we see if the biggest TSN we saw in the sack is
3214 * smaller than the recovery point. If so we don't strike
3215 * the tsn... otherwise we CAN strike the TSN.
3218 * @@@ JRI: Check for CMT if (accum_moved &&
3219 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3222 if (accum_moved && asoc->fast_retran_loss_recovery) {
3224 * Strike the TSN if in fast-recovery and cum-ack
3227 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3228 sctp_log_fr(biggest_tsn_newly_acked,
3229 tp1->rec.data.TSN_seq,
3231 SCTP_FR_LOG_STRIKE_CHUNK);
3233 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3236 if ((asoc->sctp_cmt_on_off > 0) &&
3237 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3239 * CMT DAC algorithm: If SACK flag is set to
3240 * 0, then lowest_newack test will not pass
3241 * because it would have been set to the
3242 * cumack earlier. If not already to be
3243 * rtx'd, If not a mixed sack and if tp1 is
3244 * not between two sacked TSNs, then mark by
3245 * one more. NOTE that we are marking by one
3246 * additional time since the SACK DAC flag
3247 * indicates that two packets have been
3248 * received after this missing TSN.
3250 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3251 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3252 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3253 sctp_log_fr(16 + num_dests_sacked,
3254 tp1->rec.data.TSN_seq,
3256 SCTP_FR_LOG_STRIKE_CHUNK);
3261 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3262 (asoc->sctp_cmt_on_off == 0)) {
3264 * For those that have done a FR we must take
3265 * special consideration if we strike. I.e the
3266 * biggest_newly_acked must be higher than the
3267 * sending_seq at the time we did the FR.
3270 #ifdef SCTP_FR_TO_ALTERNATE
3272 * If FR's go to new networks, then we must only do
3273 * this for singly homed asoc's. However if the FR's
3274 * go to the same network (Armando's work) then its
3275 * ok to FR multiple times.
3283 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3284 tp1->rec.data.fast_retran_tsn)) {
3286 * Strike the TSN, since this ack is
3287 * beyond where things were when we
3290 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3291 sctp_log_fr(biggest_tsn_newly_acked,
3292 tp1->rec.data.TSN_seq,
3294 SCTP_FR_LOG_STRIKE_CHUNK);
3296 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3300 if ((asoc->sctp_cmt_on_off > 0) &&
3301 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3303 * CMT DAC algorithm: If
3304 * SACK flag is set to 0,
3305 * then lowest_newack test
3306 * will not pass because it
3307 * would have been set to
3308 * the cumack earlier. If
3309 * not already to be rtx'd,
3310 * If not a mixed sack and
3311 * if tp1 is not between two
3312 * sacked TSNs, then mark by
3313 * one more. NOTE that we
3314 * are marking by one
3315 * additional time since the
3316 * SACK DAC flag indicates
3317 * that two packets have
3318 * been received after this
3321 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3322 (num_dests_sacked == 1) &&
3323 SCTP_TSN_GT(this_sack_lowest_newack,
3324 tp1->rec.data.TSN_seq)) {
3325 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3326 sctp_log_fr(32 + num_dests_sacked,
3327 tp1->rec.data.TSN_seq,
3329 SCTP_FR_LOG_STRIKE_CHUNK);
3331 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3339 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3342 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3343 biggest_tsn_newly_acked)) {
3345 * We don't strike these: This is the HTNA
3346 * algorithm i.e. we don't strike If our TSN is
3347 * larger than the Highest TSN Newly Acked.
3351 /* Strike the TSN */
3352 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3353 sctp_log_fr(biggest_tsn_newly_acked,
3354 tp1->rec.data.TSN_seq,
3356 SCTP_FR_LOG_STRIKE_CHUNK);
3358 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3361 if ((asoc->sctp_cmt_on_off > 0) &&
3362 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3364 * CMT DAC algorithm: If SACK flag is set to
3365 * 0, then lowest_newack test will not pass
3366 * because it would have been set to the
3367 * cumack earlier. If not already to be
3368 * rtx'd, If not a mixed sack and if tp1 is
3369 * not between two sacked TSNs, then mark by
3370 * one more. NOTE that we are marking by one
3371 * additional time since the SACK DAC flag
3372 * indicates that two packets have been
3373 * received after this missing TSN.
3375 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3376 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3377 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3378 sctp_log_fr(48 + num_dests_sacked,
3379 tp1->rec.data.TSN_seq,
3381 SCTP_FR_LOG_STRIKE_CHUNK);
3387 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3388 struct sctp_nets *alt;
3390 /* fix counts and things */
3391 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3392 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3393 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3395 (uint32_t) (uintptr_t) tp1->whoTo,
3396 tp1->rec.data.TSN_seq);
3399 tp1->whoTo->net_ack++;
3400 sctp_flight_size_decrease(tp1);
3401 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3402 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3407 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3408 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3410 /* add back to the rwnd */
3411 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3413 /* remove from the total flight */
3414 sctp_total_flight_decrease(stcb, tp1);
3416 if ((stcb->asoc.prsctp_supported) &&
3417 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3419 * Has it been retransmitted tv_sec times? -
3420 * we store the retran count there.
3422 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3423 /* Yes, so drop it */
3424 if (tp1->data != NULL) {
3425 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3426 SCTP_SO_NOT_LOCKED);
3428 /* Make sure to flag we had a FR */
3429 tp1->whoTo->net_ack++;
3434 * SCTP_PRINTF("OK, we are now ready to FR this
3437 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3438 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3442 /* This is a subsequent FR */
3443 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3445 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3446 if (asoc->sctp_cmt_on_off > 0) {
3448 * CMT: Using RTX_SSTHRESH policy for CMT.
3449 * If CMT is being used, then pick dest with
3450 * largest ssthresh for any retransmission.
3452 tp1->no_fr_allowed = 1;
3454 /* sa_ignore NO_NULL_CHK */
3455 if (asoc->sctp_cmt_pf > 0) {
3457 * JRS 5/18/07 - If CMT PF is on,
3458 * use the PF version of
3461 alt = sctp_find_alternate_net(stcb, alt, 2);
3464 * JRS 5/18/07 - If only CMT is on,
3465 * use the CMT version of
3468 /* sa_ignore NO_NULL_CHK */
3469 alt = sctp_find_alternate_net(stcb, alt, 1);
3475 * CUCv2: If a different dest is picked for
3476 * the retransmission, then new
3477 * (rtx-)pseudo_cumack needs to be tracked
3478 * for orig dest. Let CUCv2 track new (rtx-)
3479 * pseudo-cumack always.
3482 tp1->whoTo->find_pseudo_cumack = 1;
3483 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3485 } else {/* CMT is OFF */
3487 #ifdef SCTP_FR_TO_ALTERNATE
3488 /* Can we find an alternate? */
3489 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3492 * default behavior is to NOT retransmit
3493 * FR's to an alternate. Armando Caro's
3494 * paper details why.
3500 tp1->rec.data.doing_fast_retransmit = 1;
3502 /* mark the sending seq for possible subsequent FR's */
3504 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3505 * (uint32_t)tpi->rec.data.TSN_seq);
3507 if (TAILQ_EMPTY(&asoc->send_queue)) {
3509 * If the queue of send is empty then its
3510 * the next sequence number that will be
3511 * assigned so we subtract one from this to
3512 * get the one we last sent.
3514 tp1->rec.data.fast_retran_tsn = sending_seq;
3517 * If there are chunks on the send queue
3518 * (unsent data that has made it from the
3519 * stream queues but not out the door, we
3520 * take the first one (which will have the
3521 * lowest TSN) and subtract one to get the
3524 struct sctp_tmit_chunk *ttt;
3526 ttt = TAILQ_FIRST(&asoc->send_queue);
3527 tp1->rec.data.fast_retran_tsn =
3528 ttt->rec.data.TSN_seq;
3533 * this guy had a RTO calculation pending on
3536 if ((tp1->whoTo != NULL) &&
3537 (tp1->whoTo->rto_needed == 0)) {
3538 tp1->whoTo->rto_needed = 1;
3542 if (alt != tp1->whoTo) {
3543 /* yes, there is an alternate. */
3544 sctp_free_remote_addr(tp1->whoTo);
3545 /* sa_ignore FREED_MEMORY */
3547 atomic_add_int(&alt->ref_count, 1);
3553 struct sctp_tmit_chunk *
3554 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3555 struct sctp_association *asoc)
3557 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3561 if (asoc->prsctp_supported == 0) {
3564 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3565 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3566 tp1->sent != SCTP_DATAGRAM_RESEND &&
3567 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3568 /* no chance to advance, out of here */
3571 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3572 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3573 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3574 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3575 asoc->advanced_peer_ack_point,
3576 tp1->rec.data.TSN_seq, 0, 0);
3579 if (!PR_SCTP_ENABLED(tp1->flags)) {
3581 * We can't fwd-tsn past any that are reliable aka
3582 * retransmitted until the asoc fails.
3587 (void)SCTP_GETTIME_TIMEVAL(&now);
3591 * now we got a chunk which is marked for another
3592 * retransmission to a PR-stream but has run out its chances
3593 * already maybe OR has been marked to skip now. Can we skip
3594 * it if its a resend?
3596 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3597 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3599 * Now is this one marked for resend and its time is
3602 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3603 /* Yes so drop it */
3605 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3606 1, SCTP_SO_NOT_LOCKED);
3610 * No, we are done when hit one for resend
3611 * whos time as not expired.
3617 * Ok now if this chunk is marked to drop it we can clean up
3618 * the chunk, advance our peer ack point and we can check
3621 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3622 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3623 /* advance PeerAckPoint goes forward */
3624 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3625 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3627 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3628 /* No update but we do save the chk */
3633 * If it is still in RESEND we can advance no
3643 sctp_fs_audit(struct sctp_association *asoc)
3645 struct sctp_tmit_chunk *chk;
3646 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3650 int entry_flight, entry_cnt;
3656 entry_flight = asoc->total_flight;
3657 entry_cnt = asoc->total_flight_count;
3659 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3662 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3663 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3664 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3665 chk->rec.data.TSN_seq,
3669 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3671 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3673 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3680 if ((inflight > 0) || (inbetween > 0)) {
3682 panic("Flight size-express incorrect? \n");
3684 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3685 entry_flight, entry_cnt);
3687 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3688 inflight, inbetween, resend, above, acked);
3697 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3698 struct sctp_association *asoc,
3699 struct sctp_tmit_chunk *tp1)
3701 tp1->window_probe = 0;
3702 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3703 /* TSN's skipped we do NOT move back. */
3704 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3705 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3707 (uint32_t) (uintptr_t) tp1->whoTo,
3708 tp1->rec.data.TSN_seq);
3711 /* First setup this by shrinking flight */
3712 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3713 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3716 sctp_flight_size_decrease(tp1);
3717 sctp_total_flight_decrease(stcb, tp1);
3718 /* Now mark for resend */
3719 tp1->sent = SCTP_DATAGRAM_RESEND;
3720 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3722 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3723 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3724 tp1->whoTo->flight_size,
3726 (uint32_t) (uintptr_t) tp1->whoTo,
3727 tp1->rec.data.TSN_seq);
3732 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3733 uint32_t rwnd, int *abort_now, int ecne_seen)
3735 struct sctp_nets *net;
3736 struct sctp_association *asoc;
3737 struct sctp_tmit_chunk *tp1, *tp2;
3739 int win_probe_recovery = 0;
3740 int win_probe_recovered = 0;
3741 int j, done_once = 0;
3744 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3745 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3746 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3748 SCTP_TCB_LOCK_ASSERT(stcb);
3749 #ifdef SCTP_ASOCLOG_OF_TSNS
3750 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3751 stcb->asoc.cumack_log_at++;
3752 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3753 stcb->asoc.cumack_log_at = 0;
3757 old_rwnd = asoc->peers_rwnd;
3758 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3761 } else if (asoc->last_acked_seq == cumack) {
3762 /* Window update sack */
3763 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3764 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3765 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3766 /* SWS sender side engages */
3767 asoc->peers_rwnd = 0;
3769 if (asoc->peers_rwnd > old_rwnd) {
3774 /* First setup for CC stuff */
3775 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3776 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3777 /* Drag along the window_tsn for cwr's */
3778 net->cwr_window_tsn = cumack;
3780 net->prev_cwnd = net->cwnd;
3785 * CMT: Reset CUC and Fast recovery algo variables before
3788 net->new_pseudo_cumack = 0;
3789 net->will_exit_fast_recovery = 0;
3790 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3791 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3794 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3797 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3798 tp1 = TAILQ_LAST(&asoc->sent_queue,
3799 sctpchunk_listhead);
3800 send_s = tp1->rec.data.TSN_seq + 1;
3802 send_s = asoc->sending_seq;
3804 if (SCTP_TSN_GE(cumack, send_s)) {
3805 struct mbuf *op_err;
3806 char msg[SCTP_DIAG_INFO_LEN];
3810 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3812 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3813 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3814 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3818 asoc->this_sack_highest_gap = cumack;
3819 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3820 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3821 stcb->asoc.overall_error_count,
3823 SCTP_FROM_SCTP_INDATA,
3826 stcb->asoc.overall_error_count = 0;
3827 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3828 /* process the new consecutive TSN first */
3829 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3830 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3831 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3832 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3834 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3836 * If it is less than ACKED, it is
3837 * now no-longer in flight. Higher
3838 * values may occur during marking
3840 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3841 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3842 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3843 tp1->whoTo->flight_size,
3845 (uint32_t) (uintptr_t) tp1->whoTo,
3846 tp1->rec.data.TSN_seq);
3848 sctp_flight_size_decrease(tp1);
3849 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3850 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3853 /* sa_ignore NO_NULL_CHK */
3854 sctp_total_flight_decrease(stcb, tp1);
3856 tp1->whoTo->net_ack += tp1->send_size;
3857 if (tp1->snd_count < 2) {
3859 * True non-retransmited
3862 tp1->whoTo->net_ack2 +=
3865 /* update RTO too? */
3874 sctp_calculate_rto(stcb,
3876 &tp1->sent_rcv_time,
3877 sctp_align_safe_nocopy,
3878 SCTP_RTT_FROM_DATA);
3881 if (tp1->whoTo->rto_needed == 0) {
3882 tp1->whoTo->rto_needed = 1;
3888 * CMT: CUCv2 algorithm. From the
3889 * cumack'd TSNs, for each TSN being
3890 * acked for the first time, set the
3891 * following variables for the
3892 * corresp destination.
3893 * new_pseudo_cumack will trigger a
3895 * find_(rtx_)pseudo_cumack will
3896 * trigger search for the next
3897 * expected (rtx-)pseudo-cumack.
3899 tp1->whoTo->new_pseudo_cumack = 1;
3900 tp1->whoTo->find_pseudo_cumack = 1;
3901 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3903 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3904 /* sa_ignore NO_NULL_CHK */
3905 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3908 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3909 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3911 if (tp1->rec.data.chunk_was_revoked) {
3912 /* deflate the cwnd */
3913 tp1->whoTo->cwnd -= tp1->book_size;
3914 tp1->rec.data.chunk_was_revoked = 0;
3916 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3917 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3918 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3921 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3925 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3926 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3927 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3928 asoc->trigger_reset = 1;
3930 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3932 /* sa_ignore NO_NULL_CHK */
3933 sctp_free_bufspace(stcb, asoc, tp1, 1);
3934 sctp_m_freem(tp1->data);
3937 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3938 sctp_log_sack(asoc->last_acked_seq,
3940 tp1->rec.data.TSN_seq,
3943 SCTP_LOG_FREE_SENT);
3945 asoc->sent_queue_cnt--;
3946 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3953 /* sa_ignore NO_NULL_CHK */
3954 if (stcb->sctp_socket) {
3955 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3959 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3960 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3961 /* sa_ignore NO_NULL_CHK */
3962 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3964 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3965 so = SCTP_INP_SO(stcb->sctp_ep);
3966 atomic_add_int(&stcb->asoc.refcnt, 1);
3967 SCTP_TCB_UNLOCK(stcb);
3968 SCTP_SOCKET_LOCK(so, 1);
3969 SCTP_TCB_LOCK(stcb);
3970 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3971 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3972 /* assoc was freed while we were unlocked */
3973 SCTP_SOCKET_UNLOCK(so, 1);
3977 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3978 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3979 SCTP_SOCKET_UNLOCK(so, 1);
3982 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3983 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3987 /* JRS - Use the congestion control given in the CC module */
3988 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3989 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3990 if (net->net_ack2 > 0) {
3992 * Karn's rule applies to clearing error
3993 * count, this is optional.
3995 net->error_count = 0;
3996 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3997 /* addr came good */
3998 net->dest_state |= SCTP_ADDR_REACHABLE;
3999 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4000 0, (void *)net, SCTP_SO_NOT_LOCKED);
4002 if (net == stcb->asoc.primary_destination) {
4003 if (stcb->asoc.alternate) {
4005 * release the alternate,
4008 sctp_free_remote_addr(stcb->asoc.alternate);
4009 stcb->asoc.alternate = NULL;
4012 if (net->dest_state & SCTP_ADDR_PF) {
4013 net->dest_state &= ~SCTP_ADDR_PF;
4014 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4015 stcb->sctp_ep, stcb, net,
4016 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4017 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4018 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4019 /* Done with this net */
4022 /* restore any doubled timers */
4023 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4024 if (net->RTO < stcb->asoc.minrto) {
4025 net->RTO = stcb->asoc.minrto;
4027 if (net->RTO > stcb->asoc.maxrto) {
4028 net->RTO = stcb->asoc.maxrto;
4032 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4034 asoc->last_acked_seq = cumack;
4036 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4037 /* nothing left in-flight */
4038 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4039 net->flight_size = 0;
4040 net->partial_bytes_acked = 0;
4042 asoc->total_flight = 0;
4043 asoc->total_flight_count = 0;
4046 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4047 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4048 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4049 /* SWS sender side engages */
4050 asoc->peers_rwnd = 0;
4052 if (asoc->peers_rwnd > old_rwnd) {
4053 win_probe_recovery = 1;
4055 /* Now assure a timer where data is queued at */
4058 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4061 if (win_probe_recovery && (net->window_probe)) {
4062 win_probe_recovered = 1;
4064 * Find first chunk that was used with window probe
4065 * and clear the sent
4067 /* sa_ignore FREED_MEMORY */
4068 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4069 if (tp1->window_probe) {
4070 /* move back to data send queue */
4071 sctp_window_probe_recovery(stcb, asoc, tp1);
4076 if (net->RTO == 0) {
4077 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4079 to_ticks = MSEC_TO_TICKS(net->RTO);
4081 if (net->flight_size) {
4083 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4084 sctp_timeout_handler, &net->rxt_timer);
4085 if (net->window_probe) {
4086 net->window_probe = 0;
4089 if (net->window_probe) {
4091 * In window probes we must assure a timer
4092 * is still running there
4094 net->window_probe = 0;
4095 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4096 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4097 sctp_timeout_handler, &net->rxt_timer);
4099 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4100 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4102 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4107 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4108 (asoc->sent_queue_retran_cnt == 0) &&
4109 (win_probe_recovered == 0) &&
4112 * huh, this should not happen unless all packets are
4113 * PR-SCTP and marked to skip of course.
4115 if (sctp_fs_audit(asoc)) {
4116 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4117 net->flight_size = 0;
4119 asoc->total_flight = 0;
4120 asoc->total_flight_count = 0;
4121 asoc->sent_queue_retran_cnt = 0;
4122 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4123 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4124 sctp_flight_size_increase(tp1);
4125 sctp_total_flight_increase(stcb, tp1);
4126 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4127 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4134 /**********************************/
4135 /* Now what about shutdown issues */
4136 /**********************************/
4137 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4138 /* nothing left on sendqueue.. consider done */
4140 if ((asoc->stream_queue_cnt == 1) &&
4141 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4142 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4143 (asoc->locked_on_sending)
4145 struct sctp_stream_queue_pending *sp;
4148 * I may be in a state where we got all across.. but
4149 * cannot write more due to a shutdown... we abort
4150 * since the user did not indicate EOR in this case.
4151 * The sp will be cleaned during free of the asoc.
4153 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4155 if ((sp) && (sp->length == 0)) {
4156 /* Let cleanup code purge it */
4157 if (sp->msg_is_complete) {
4158 asoc->stream_queue_cnt--;
4160 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4161 asoc->locked_on_sending = NULL;
4162 asoc->stream_queue_cnt--;
4166 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4167 (asoc->stream_queue_cnt == 0)) {
4168 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4169 /* Need to abort here */
4170 struct mbuf *op_err;
4175 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4176 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4177 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4180 struct sctp_nets *netp;
4182 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4183 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4184 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4186 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4187 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4188 sctp_stop_timers_for_shutdown(stcb);
4189 if (asoc->alternate) {
4190 netp = asoc->alternate;
4192 netp = asoc->primary_destination;
4194 sctp_send_shutdown(stcb, netp);
4195 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4196 stcb->sctp_ep, stcb, netp);
4197 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4198 stcb->sctp_ep, stcb, netp);
4200 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4201 (asoc->stream_queue_cnt == 0)) {
4202 struct sctp_nets *netp;
4204 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4207 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4208 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4209 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4210 sctp_stop_timers_for_shutdown(stcb);
4211 if (asoc->alternate) {
4212 netp = asoc->alternate;
4214 netp = asoc->primary_destination;
4216 sctp_send_shutdown_ack(stcb, netp);
4217 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4218 stcb->sctp_ep, stcb, netp);
4221 /*********************************************/
4222 /* Here we perform PR-SCTP procedures */
4224 /*********************************************/
4225 /* C1. update advancedPeerAckPoint */
4226 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4227 asoc->advanced_peer_ack_point = cumack;
4229 /* PR-Sctp issues need to be addressed too */
4230 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4231 struct sctp_tmit_chunk *lchk;
4232 uint32_t old_adv_peer_ack_point;
4234 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4235 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4236 /* C3. See if we need to send a Fwd-TSN */
4237 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4239 * ISSUE with ECN, see FWD-TSN processing.
4241 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4242 send_forward_tsn(stcb, asoc);
4244 /* try to FR fwd-tsn's that get lost too */
4245 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4246 send_forward_tsn(stcb, asoc);
4251 /* Assure a timer is up */
4252 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4253 stcb->sctp_ep, stcb, lchk->whoTo);
4256 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4257 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4259 stcb->asoc.peers_rwnd,
4260 stcb->asoc.total_flight,
4261 stcb->asoc.total_output_queue_size);
4266 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4267 struct sctp_tcb *stcb,
4268 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4269 int *abort_now, uint8_t flags,
4270 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4272 struct sctp_association *asoc;
4273 struct sctp_tmit_chunk *tp1, *tp2;
4274 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4275 uint16_t wake_him = 0;
4276 uint32_t send_s = 0;
4278 int accum_moved = 0;
4279 int will_exit_fast_recovery = 0;
4280 uint32_t a_rwnd, old_rwnd;
4281 int win_probe_recovery = 0;
4282 int win_probe_recovered = 0;
4283 struct sctp_nets *net = NULL;
4286 uint8_t reneged_all = 0;
4287 uint8_t cmt_dac_flag;
4290 * we take any chance we can to service our queues since we cannot
4291 * get awoken when the socket is read from :<
4294 * Now perform the actual SACK handling: 1) Verify that it is not an
4295 * old sack, if so discard. 2) If there is nothing left in the send
4296 * queue (cum-ack is equal to last acked) then you have a duplicate
4297 * too, update any rwnd change and verify no timers are running.
4298 * then return. 3) Process any new consequtive data i.e. cum-ack
4299 * moved process these first and note that it moved. 4) Process any
4300 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4301 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4302 * sync up flightsizes and things, stop all timers and also check
4303 * for shutdown_pending state. If so then go ahead and send off the
4304 * shutdown. If in shutdown recv, send off the shutdown-ack and
4305 * start that timer, Ret. 9) Strike any non-acked things and do FR
4306 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4307 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4308 * if in shutdown_recv state.
4310 SCTP_TCB_LOCK_ASSERT(stcb);
4312 this_sack_lowest_newack = 0;
4313 SCTP_STAT_INCR(sctps_slowpath_sack);
4315 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4316 #ifdef SCTP_ASOCLOG_OF_TSNS
4317 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4318 stcb->asoc.cumack_log_at++;
4319 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4320 stcb->asoc.cumack_log_at = 0;
4325 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4326 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4327 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4329 old_rwnd = stcb->asoc.peers_rwnd;
4330 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4331 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4332 stcb->asoc.overall_error_count,
4334 SCTP_FROM_SCTP_INDATA,
4337 stcb->asoc.overall_error_count = 0;
4339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4340 sctp_log_sack(asoc->last_acked_seq,
4347 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4349 uint32_t *dupdata, dblock;
4351 for (i = 0; i < num_dup; i++) {
4352 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4353 sizeof(uint32_t), (uint8_t *) & dblock);
4354 if (dupdata == NULL) {
4357 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4360 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4362 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4363 tp1 = TAILQ_LAST(&asoc->sent_queue,
4364 sctpchunk_listhead);
4365 send_s = tp1->rec.data.TSN_seq + 1;
4368 send_s = asoc->sending_seq;
4370 if (SCTP_TSN_GE(cum_ack, send_s)) {
4371 struct mbuf *op_err;
4372 char msg[SCTP_DIAG_INFO_LEN];
4375 * no way, we have not even sent this TSN out yet.
4376 * Peer is hopelessly messed up with us.
4378 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4381 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4382 tp1->rec.data.TSN_seq, (void *)tp1);
4387 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4389 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4390 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4391 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4395 /**********************/
4396 /* 1) check the range */
4397 /**********************/
4398 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4399 /* acking something behind */
4402 /* update the Rwnd of the peer */
4403 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4404 TAILQ_EMPTY(&asoc->send_queue) &&
4405 (asoc->stream_queue_cnt == 0)) {
4406 /* nothing left on send/sent and strmq */
4407 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4408 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4409 asoc->peers_rwnd, 0, 0, a_rwnd);
4411 asoc->peers_rwnd = a_rwnd;
4412 if (asoc->sent_queue_retran_cnt) {
4413 asoc->sent_queue_retran_cnt = 0;
4415 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4416 /* SWS sender side engages */
4417 asoc->peers_rwnd = 0;
4419 /* stop any timers */
4420 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4421 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4422 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4423 net->partial_bytes_acked = 0;
4424 net->flight_size = 0;
4426 asoc->total_flight = 0;
4427 asoc->total_flight_count = 0;
4431 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4432 * things. The total byte count acked is tracked in netAckSz AND
4433 * netAck2 is used to track the total bytes acked that are un-
4434 * amibguious and were never retransmitted. We track these on a per
4435 * destination address basis.
4437 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4438 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4439 /* Drag along the window_tsn for cwr's */
4440 net->cwr_window_tsn = cum_ack;
4442 net->prev_cwnd = net->cwnd;
4447 * CMT: Reset CUC and Fast recovery algo variables before
4450 net->new_pseudo_cumack = 0;
4451 net->will_exit_fast_recovery = 0;
4452 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4453 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4456 /* process the new consecutive TSN first */
4457 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4458 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4459 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4461 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4463 * If it is less than ACKED, it is
4464 * now no-longer in flight. Higher
4465 * values may occur during marking
4467 if ((tp1->whoTo->dest_state &
4468 SCTP_ADDR_UNCONFIRMED) &&
4469 (tp1->snd_count < 2)) {
4471 * If there was no retran
4472 * and the address is
4473 * un-confirmed and we sent
4475 * sacked.. its confirmed,
4478 tp1->whoTo->dest_state &=
4479 ~SCTP_ADDR_UNCONFIRMED;
4481 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4482 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4483 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4484 tp1->whoTo->flight_size,
4486 (uint32_t) (uintptr_t) tp1->whoTo,
4487 tp1->rec.data.TSN_seq);
4489 sctp_flight_size_decrease(tp1);
4490 sctp_total_flight_decrease(stcb, tp1);
4491 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4492 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4496 tp1->whoTo->net_ack += tp1->send_size;
4498 /* CMT SFR and DAC algos */
4499 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4500 tp1->whoTo->saw_newack = 1;
4502 if (tp1->snd_count < 2) {
4504 * True non-retransmited
4507 tp1->whoTo->net_ack2 +=
4510 /* update RTO too? */
4514 sctp_calculate_rto(stcb,
4516 &tp1->sent_rcv_time,
4517 sctp_align_safe_nocopy,
4518 SCTP_RTT_FROM_DATA);
4521 if (tp1->whoTo->rto_needed == 0) {
4522 tp1->whoTo->rto_needed = 1;
4528 * CMT: CUCv2 algorithm. From the
4529 * cumack'd TSNs, for each TSN being
4530 * acked for the first time, set the
4531 * following variables for the
4532 * corresp destination.
4533 * new_pseudo_cumack will trigger a
4535 * find_(rtx_)pseudo_cumack will
4536 * trigger search for the next
4537 * expected (rtx-)pseudo-cumack.
4539 tp1->whoTo->new_pseudo_cumack = 1;
4540 tp1->whoTo->find_pseudo_cumack = 1;
4541 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4545 sctp_log_sack(asoc->last_acked_seq,
4547 tp1->rec.data.TSN_seq,
4550 SCTP_LOG_TSN_ACKED);
4552 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4553 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4556 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4557 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4558 #ifdef SCTP_AUDITING_ENABLED
4559 sctp_audit_log(0xB3,
4560 (asoc->sent_queue_retran_cnt & 0x000000ff));
4563 if (tp1->rec.data.chunk_was_revoked) {
4564 /* deflate the cwnd */
4565 tp1->whoTo->cwnd -= tp1->book_size;
4566 tp1->rec.data.chunk_was_revoked = 0;
4568 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4569 tp1->sent = SCTP_DATAGRAM_ACKED;
4576 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4577 /* always set this up to cum-ack */
4578 asoc->this_sack_highest_gap = last_tsn;
4580 if ((num_seg > 0) || (num_nr_seg > 0)) {
4583 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4584 * to be greater than the cumack. Also reset saw_newack to 0
4587 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4588 net->saw_newack = 0;
4589 net->this_sack_highest_newack = last_tsn;
4593 * thisSackHighestGap will increase while handling NEW
4594 * segments this_sack_highest_newack will increase while
4595 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4596 * used for CMT DAC algo. saw_newack will also change.
4598 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4599 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4600 num_seg, num_nr_seg, &rto_ok)) {
4603 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4605 * validate the biggest_tsn_acked in the gap acks if
4606 * strict adherence is wanted.
4608 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4610 * peer is either confused or we are under
4611 * attack. We must abort.
4613 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4614 biggest_tsn_acked, send_s);
4619 /*******************************************/
4620 /* cancel ALL T3-send timer if accum moved */
4621 /*******************************************/
4622 if (asoc->sctp_cmt_on_off > 0) {
4623 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4624 if (net->new_pseudo_cumack)
4625 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4627 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4632 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4633 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4634 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4638 /********************************************/
4639 /* drop the acked chunks from the sentqueue */
4640 /********************************************/
4641 asoc->last_acked_seq = cum_ack;
4643 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4644 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4647 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4648 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4649 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4652 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4656 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4657 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4658 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4659 asoc->trigger_reset = 1;
4661 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4662 if (PR_SCTP_ENABLED(tp1->flags)) {
4663 if (asoc->pr_sctp_cnt != 0)
4664 asoc->pr_sctp_cnt--;
4666 asoc->sent_queue_cnt--;
4668 /* sa_ignore NO_NULL_CHK */
4669 sctp_free_bufspace(stcb, asoc, tp1, 1);
4670 sctp_m_freem(tp1->data);
4672 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4673 asoc->sent_queue_cnt_removeable--;
4676 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4677 sctp_log_sack(asoc->last_acked_seq,
4679 tp1->rec.data.TSN_seq,
4682 SCTP_LOG_FREE_SENT);
4684 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4687 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4689 panic("Warning flight size is postive and should be 0");
4691 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4692 asoc->total_flight);
4694 asoc->total_flight = 0;
4696 /* sa_ignore NO_NULL_CHK */
4697 if ((wake_him) && (stcb->sctp_socket)) {
4698 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4702 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4703 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4704 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4706 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4707 so = SCTP_INP_SO(stcb->sctp_ep);
4708 atomic_add_int(&stcb->asoc.refcnt, 1);
4709 SCTP_TCB_UNLOCK(stcb);
4710 SCTP_SOCKET_LOCK(so, 1);
4711 SCTP_TCB_LOCK(stcb);
4712 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4713 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4714 /* assoc was freed while we were unlocked */
4715 SCTP_SOCKET_UNLOCK(so, 1);
4719 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4720 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4721 SCTP_SOCKET_UNLOCK(so, 1);
4724 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4725 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4729 if (asoc->fast_retran_loss_recovery && accum_moved) {
4730 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4731 /* Setup so we will exit RFC2582 fast recovery */
4732 will_exit_fast_recovery = 1;
4736 * Check for revoked fragments:
4738 * if Previous sack - Had no frags then we can't have any revoked if
4739 * Previous sack - Had frag's then - If we now have frags aka
4740 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4741 * some of them. else - The peer revoked all ACKED fragments, since
4742 * we had some before and now we have NONE.
4746 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4747 asoc->saw_sack_with_frags = 1;
4748 } else if (asoc->saw_sack_with_frags) {
4749 int cnt_revoked = 0;
4751 /* Peer revoked all dg's marked or acked */
4752 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4753 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4754 tp1->sent = SCTP_DATAGRAM_SENT;
4755 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4756 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4757 tp1->whoTo->flight_size,
4759 (uint32_t) (uintptr_t) tp1->whoTo,
4760 tp1->rec.data.TSN_seq);
4762 sctp_flight_size_increase(tp1);
4763 sctp_total_flight_increase(stcb, tp1);
4764 tp1->rec.data.chunk_was_revoked = 1;
4766 * To ensure that this increase in
4767 * flightsize, which is artificial, does not
4768 * throttle the sender, we also increase the
4769 * cwnd artificially.
4771 tp1->whoTo->cwnd += tp1->book_size;
4778 asoc->saw_sack_with_frags = 0;
4781 asoc->saw_sack_with_nr_frags = 1;
4783 asoc->saw_sack_with_nr_frags = 0;
4785 /* JRS - Use the congestion control given in the CC module */
4786 if (ecne_seen == 0) {
4787 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4788 if (net->net_ack2 > 0) {
4790 * Karn's rule applies to clearing error
4791 * count, this is optional.
4793 net->error_count = 0;
4794 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4795 /* addr came good */
4796 net->dest_state |= SCTP_ADDR_REACHABLE;
4797 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4798 0, (void *)net, SCTP_SO_NOT_LOCKED);
4800 if (net == stcb->asoc.primary_destination) {
4801 if (stcb->asoc.alternate) {
4803 * release the alternate,
4806 sctp_free_remote_addr(stcb->asoc.alternate);
4807 stcb->asoc.alternate = NULL;
4810 if (net->dest_state & SCTP_ADDR_PF) {
4811 net->dest_state &= ~SCTP_ADDR_PF;
4812 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4813 stcb->sctp_ep, stcb, net,
4814 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4815 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4816 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4817 /* Done with this net */
4820 /* restore any doubled timers */
4821 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4822 if (net->RTO < stcb->asoc.minrto) {
4823 net->RTO = stcb->asoc.minrto;
4825 if (net->RTO > stcb->asoc.maxrto) {
4826 net->RTO = stcb->asoc.maxrto;
4830 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4832 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4833 /* nothing left in-flight */
4834 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4835 /* stop all timers */
4836 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4838 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4839 net->flight_size = 0;
4840 net->partial_bytes_acked = 0;
4842 asoc->total_flight = 0;
4843 asoc->total_flight_count = 0;
4845 /**********************************/
4846 /* Now what about shutdown issues */
4847 /**********************************/
4848 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4849 /* nothing left on sendqueue.. consider done */
4850 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4851 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4852 asoc->peers_rwnd, 0, 0, a_rwnd);
4854 asoc->peers_rwnd = a_rwnd;
4855 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4856 /* SWS sender side engages */
4857 asoc->peers_rwnd = 0;
4860 if ((asoc->stream_queue_cnt == 1) &&
4861 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4862 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4863 (asoc->locked_on_sending)
4865 struct sctp_stream_queue_pending *sp;
4868 * I may be in a state where we got all across.. but
4869 * cannot write more due to a shutdown... we abort
4870 * since the user did not indicate EOR in this case.
4872 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4874 if ((sp) && (sp->length == 0)) {
4875 asoc->locked_on_sending = NULL;
4876 if (sp->msg_is_complete) {
4877 asoc->stream_queue_cnt--;
4879 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4880 asoc->stream_queue_cnt--;
4884 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4885 (asoc->stream_queue_cnt == 0)) {
4886 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4887 /* Need to abort here */
4888 struct mbuf *op_err;
4893 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4894 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4895 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4898 struct sctp_nets *netp;
4900 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4901 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4902 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4904 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4905 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4906 sctp_stop_timers_for_shutdown(stcb);
4907 if (asoc->alternate) {
4908 netp = asoc->alternate;
4910 netp = asoc->primary_destination;
4912 sctp_send_shutdown(stcb, netp);
4913 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4914 stcb->sctp_ep, stcb, netp);
4915 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4916 stcb->sctp_ep, stcb, netp);
4919 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4920 (asoc->stream_queue_cnt == 0)) {
4921 struct sctp_nets *netp;
4923 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4926 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4927 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4928 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4929 sctp_stop_timers_for_shutdown(stcb);
4930 if (asoc->alternate) {
4931 netp = asoc->alternate;
4933 netp = asoc->primary_destination;
4935 sctp_send_shutdown_ack(stcb, netp);
4936 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4937 stcb->sctp_ep, stcb, netp);
4942 * Now here we are going to recycle net_ack for a different use...
4945 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4950 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4951 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4952 * automatically ensure that.
4954 if ((asoc->sctp_cmt_on_off > 0) &&
4955 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4956 (cmt_dac_flag == 0)) {
4957 this_sack_lowest_newack = cum_ack;
4959 if ((num_seg > 0) || (num_nr_seg > 0)) {
4960 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4961 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4963 /* JRS - Use the congestion control given in the CC module */
4964 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4966 /* Now are we exiting loss recovery ? */
4967 if (will_exit_fast_recovery) {
4968 /* Ok, we must exit fast recovery */
4969 asoc->fast_retran_loss_recovery = 0;
4971 if ((asoc->sat_t3_loss_recovery) &&
4972 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4973 /* end satellite t3 loss recovery */
4974 asoc->sat_t3_loss_recovery = 0;
4979 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4980 if (net->will_exit_fast_recovery) {
4981 /* Ok, we must exit fast recovery */
4982 net->fast_retran_loss_recovery = 0;
4986 /* Adjust and set the new rwnd value */
4987 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4988 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4989 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4991 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4992 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4993 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4994 /* SWS sender side engages */
4995 asoc->peers_rwnd = 0;
4997 if (asoc->peers_rwnd > old_rwnd) {
4998 win_probe_recovery = 1;
5001 * Now we must setup so we have a timer up for anyone with
5007 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5008 if (win_probe_recovery && (net->window_probe)) {
5009 win_probe_recovered = 1;
5011 * Find first chunk that was used with
5012 * window probe and clear the event. Put
5013 * it back into the send queue as if has
5016 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5017 if (tp1->window_probe) {
5018 sctp_window_probe_recovery(stcb, asoc, tp1);
5023 if (net->flight_size) {
5025 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5026 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5027 stcb->sctp_ep, stcb, net);
5029 if (net->window_probe) {
5030 net->window_probe = 0;
5033 if (net->window_probe) {
5035 * In window probes we must assure a timer
5036 * is still running there
5038 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5039 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5040 stcb->sctp_ep, stcb, net);
5043 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5044 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5046 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5051 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5052 (asoc->sent_queue_retran_cnt == 0) &&
5053 (win_probe_recovered == 0) &&
5056 * huh, this should not happen unless all packets are
5057 * PR-SCTP and marked to skip of course.
5059 if (sctp_fs_audit(asoc)) {
5060 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5061 net->flight_size = 0;
5063 asoc->total_flight = 0;
5064 asoc->total_flight_count = 0;
5065 asoc->sent_queue_retran_cnt = 0;
5066 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5067 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5068 sctp_flight_size_increase(tp1);
5069 sctp_total_flight_increase(stcb, tp1);
5070 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5071 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5078 /*********************************************/
5079 /* Here we perform PR-SCTP procedures */
5081 /*********************************************/
5082 /* C1. update advancedPeerAckPoint */
5083 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5084 asoc->advanced_peer_ack_point = cum_ack;
5086 /* C2. try to further move advancedPeerAckPoint ahead */
5087 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5088 struct sctp_tmit_chunk *lchk;
5089 uint32_t old_adv_peer_ack_point;
5091 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5092 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5093 /* C3. See if we need to send a Fwd-TSN */
5094 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5096 * ISSUE with ECN, see FWD-TSN processing.
5098 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5099 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5100 0xee, cum_ack, asoc->advanced_peer_ack_point,
5101 old_adv_peer_ack_point);
5103 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5104 send_forward_tsn(stcb, asoc);
5106 /* try to FR fwd-tsn's that get lost too */
5107 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5108 send_forward_tsn(stcb, asoc);
5113 /* Assure a timer is up */
5114 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5115 stcb->sctp_ep, stcb, lchk->whoTo);
5118 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5119 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5121 stcb->asoc.peers_rwnd,
5122 stcb->asoc.total_flight,
5123 stcb->asoc.total_output_queue_size);
5128 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5131 uint32_t cum_ack, a_rwnd;
5133 cum_ack = ntohl(cp->cumulative_tsn_ack);
5134 /* Arrange so a_rwnd does NOT change */
5135 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5137 /* Now call the express sack handling */
5138 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5142 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5143 struct sctp_stream_in *strmin)
5145 struct sctp_queued_to_read *ctl, *nctl;
5146 struct sctp_association *asoc;
5148 int need_reasm_check = 0, old;
5151 tt = strmin->last_sequence_delivered;
5152 if (asoc->idata_supported) {
5158 * First deliver anything prior to and including the stream no that
5161 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5162 if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
5163 /* this is deliverable now */
5164 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5165 if (ctl->on_strm_q) {
5166 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5167 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5168 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5169 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5171 panic("strmin:%p ctl:%p unknown %d",
5172 strmin, ctl, ctl->on_strm_q);
5176 /* subtract pending on streams */
5177 asoc->size_on_all_streams -= ctl->length;
5178 sctp_ucount_decr(asoc->cnt_on_all_streams);
5179 /* deliver it to at least the delivery-q */
5180 if (stcb->sctp_socket) {
5181 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5182 sctp_add_to_readq(stcb->sctp_ep, stcb,
5184 &stcb->sctp_socket->so_rcv,
5185 1, SCTP_READ_LOCK_HELD,
5186 SCTP_SO_NOT_LOCKED);
5189 /* Its a fragmented message */
5190 if (ctl->first_frag_seen) {
5192 * Make it so this is next to
5193 * deliver, we restore later
5195 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5196 need_reasm_check = 1;
5201 /* no more delivery now. */
5205 if (need_reasm_check) {
5208 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5209 if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
5210 /* Restore the next to deliver unless we are ahead */
5211 strmin->last_sequence_delivered = tt;
5214 /* Left the front Partial one on */
5217 need_reasm_check = 0;
5220 * now we must deliver things in queue the normal way if any are
5223 tt = strmin->last_sequence_delivered + 1;
5224 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5225 if (tt == ctl->sinfo_ssn) {
5226 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5227 /* this is deliverable now */
5228 if (ctl->on_strm_q) {
5229 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5230 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5231 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5232 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5234 panic("strmin:%p ctl:%p unknown %d",
5235 strmin, ctl, ctl->on_strm_q);
5239 /* subtract pending on streams */
5240 asoc->size_on_all_streams -= ctl->length;
5241 sctp_ucount_decr(asoc->cnt_on_all_streams);
5242 /* deliver it to at least the delivery-q */
5243 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5244 if (stcb->sctp_socket) {
5245 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5246 sctp_add_to_readq(stcb->sctp_ep, stcb,
5248 &stcb->sctp_socket->so_rcv, 1,
5249 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5252 tt = strmin->last_sequence_delivered + 1;
5254 /* Its a fragmented message */
5255 if (ctl->first_frag_seen) {
5257 * Make it so this is next to
5260 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5261 need_reasm_check = 1;
5269 if (need_reasm_check) {
5270 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5275 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5276 struct sctp_association *asoc,
5277 uint16_t stream, uint32_t seq)
5279 struct sctp_queued_to_read *control;
5280 struct sctp_stream_in *strm;
5281 struct sctp_tmit_chunk *chk, *nchk;
5284 * For now large messages held on the stream reasm that are complete
5285 * will be tossed too. We could in theory do more work to spin
5286 * through and stop after dumping one msg aka seeing the start of a
5287 * new msg at the head, and call the delivery function... to see if
5288 * it can be delivered... But for now we just dump everything on the
5291 strm = &asoc->strmin[stream];
5292 control = find_reasm_entry(strm, (uint32_t) seq, 0, 0);
5293 if (control == NULL) {
5297 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5298 /* Purge hanging chunks */
5299 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5300 asoc->size_on_reasm_queue -= chk->send_size;
5301 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5303 sctp_m_freem(chk->data);
5306 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5308 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5309 if (control->on_read_q == 0) {
5310 sctp_free_remote_addr(control->whoFrom);
5311 if (control->data) {
5312 sctp_m_freem(control->data);
5313 control->data = NULL;
5315 sctp_free_a_readq(stcb, control);
5321 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5322 struct sctp_forward_tsn_chunk *fwd,
5323 int *abort_flag, struct mbuf *m, int offset)
5325 /* The pr-sctp fwd tsn */
5327 * here we will perform all the data receiver side steps for
5328 * processing FwdTSN, as required in by pr-sctp draft:
5330 * Assume we get FwdTSN(x):
5332 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5333 * others we have 3) examine and update re-ordering queue on
5334 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5335 * report where we are.
5337 struct sctp_association *asoc;
5338 uint32_t new_cum_tsn, gap;
5339 unsigned int i, fwd_sz, m_size;
5341 struct sctp_stream_in *strm;
5342 struct sctp_queued_to_read *ctl, *sv;
5345 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5346 SCTPDBG(SCTP_DEBUG_INDATA1,
5347 "Bad size too small/big fwd-tsn\n");
5350 m_size = (stcb->asoc.mapping_array_size << 3);
5351 /*************************************************************/
5352 /* 1. Here we update local cumTSN and shift the bitmap array */
5353 /*************************************************************/
5354 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5356 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5357 /* Already got there ... */
5361 * now we know the new TSN is more advanced, let's find the actual
5364 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5365 asoc->cumulative_tsn = new_cum_tsn;
5366 if (gap >= m_size) {
5367 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5368 struct mbuf *op_err;
5369 char msg[SCTP_DIAG_INFO_LEN];
5372 * out of range (of single byte chunks in the rwnd I
5373 * give out). This must be an attacker.
5376 snprintf(msg, sizeof(msg),
5377 "New cum ack %8.8x too high, highest TSN %8.8x",
5378 new_cum_tsn, asoc->highest_tsn_inside_map);
5379 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5380 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5381 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5384 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5386 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5387 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5388 asoc->highest_tsn_inside_map = new_cum_tsn;
5390 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5391 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5394 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5397 SCTP_TCB_LOCK_ASSERT(stcb);
5398 for (i = 0; i <= gap; i++) {
5399 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5400 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5401 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5402 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5403 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5408 /*************************************************************/
5409 /* 2. Clear up re-assembly queue */
5410 /*************************************************************/
5412 /* This is now done as part of clearing up the stream/seq */
5414 /*******************************************************/
5415 /* 3. Update the PR-stream re-ordering queues and fix */
5416 /* delivery issues as needed. */
5417 /*******************************************************/
5418 fwd_sz -= sizeof(*fwd);
5421 unsigned int num_str;
5425 struct sctp_strseq *stseq, strseqbuf;
5426 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5428 offset += sizeof(*fwd);
5430 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5431 if (asoc->idata_supported) {
5432 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5435 num_str = fwd_sz / sizeof(struct sctp_strseq);
5438 for (i = 0; i < num_str; i++) {
5439 if (asoc->idata_supported) {
5440 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5441 sizeof(struct sctp_strseq_mid),
5442 (uint8_t *) & strseqbuf_m);
5443 offset += sizeof(struct sctp_strseq_mid);
5444 if (stseq_m == NULL) {
5447 stream = ntohs(stseq_m->stream);
5448 sequence = ntohl(stseq_m->msg_id);
5450 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5451 sizeof(struct sctp_strseq),
5452 (uint8_t *) & strseqbuf);
5453 offset += sizeof(struct sctp_strseq);
5454 if (stseq == NULL) {
5457 stream = ntohs(stseq->stream);
5458 sequence = (uint32_t) ntohs(stseq->sequence);
5465 * Ok we now look for the stream/seq on the read
5466 * queue where its not all delivered. If we find it
5467 * we transmute the read entry into a PDI_ABORTED.
5469 if (stream >= asoc->streamincnt) {
5470 /* screwed up streams, stop! */
5473 if ((asoc->str_of_pdapi == stream) &&
5474 (asoc->ssn_of_pdapi == sequence)) {
5476 * If this is the one we were partially
5477 * delivering now then we no longer are.
5478 * Note this will change with the reassembly
5481 asoc->fragmented_delivery_inprogress = 0;
5483 strm = &asoc->strmin[stream];
5484 sctp_flush_reassm_for_str_seq(stcb, asoc, stream, sequence);
5485 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5486 if ((ctl->sinfo_stream == stream) &&
5487 (ctl->sinfo_ssn == sequence)) {
5488 str_seq = (stream << 16) | (0x0000ffff & sequence);
5489 ctl->pdapi_aborted = 1;
5490 sv = stcb->asoc.control_pdapi;
5492 if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5493 TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5494 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5495 TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5496 } else if (ctl->on_strm_q) {
5497 panic("strm:%p ctl:%p unknown %d",
5498 strm, ctl, ctl->on_strm_q);
5501 stcb->asoc.control_pdapi = ctl;
5502 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5504 SCTP_PARTIAL_DELIVERY_ABORTED,
5506 SCTP_SO_NOT_LOCKED);
5507 stcb->asoc.control_pdapi = sv;
5509 } else if ((ctl->sinfo_stream == stream) &&
5510 SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
5511 /* We are past our victim SSN */
5515 if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
5516 /* Update the sequence number */
5517 strm->last_sequence_delivered = sequence;
5519 /* now kick the stream the new way */
5520 /* sa_ignore NO_NULL_CHK */
5521 sctp_kick_prsctp_reorder_queue(stcb, strm);
5523 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5526 * Now slide thing forward.
5528 sctp_slide_mapping_arrays(stcb);