2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 #include "opt_ipsec.h"
38 #include "opt_inet6.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/sysctl.h>
52 #include <net/route.h>
55 #include <sys/limits.h>
56 #include <machine/cpu.h>
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/in_var.h>
66 #include <netinet/ip_var.h>
68 #include <netinet6/ip6_var.h>
70 #include <netinet/ip_icmp.h>
71 #include <netinet/icmp_var.h>
73 #include <netinet/sctp_os.h>
74 #include <netinet/sctp_var.h>
75 #include <netinet/sctp_pcb.h>
76 #include <netinet/sctp_header.h>
77 #include <netinet/sctputil.h>
78 #include <netinet/sctp_output.h>
79 #include <netinet/sctp_input.h>
80 #include <netinet/sctp_indata.h>
81 #include <netinet/sctp_uio.h>
82 #include <netinet/sctp_timer.h>
84 #include <netinet6/ipsec.h>
85 #include <netkey/key.h>
90 extern uint32_t sctp_debug_on;
95 * NOTES: On the outbound side of things I need to check the sack timer to
96 * see if I should generate a sack into the chunk queue (if I have data to
97 * send that is and will be sending it .. for bundling.
99 * The callback in sctp_usrreq.c will get called when the socket is read from.
100 * This will cause sctp_service_queues() to get called on the top entry in
104 extern int sctp_strict_sacks;
107 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
109 uint32_t calc, calc_w_oh;
112 * This is really set wrong with respect to a 1-2-m socket. Since
113 * the sb_cc is the count that everyone as put up. When we re-write
114 * sctp_soreceive then we will fix this so that ONLY this
115 * associations data is taken into account.
117 if (stcb->sctp_socket == NULL)
120 if (stcb->asoc.sb_cc == 0 &&
121 asoc->size_on_reasm_queue == 0 &&
122 asoc->size_on_all_streams == 0) {
123 /* Full rwnd granted */
124 asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.sb_hiwat,
128 /* get actual space */
129 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
132 * take out what has NOT been put on socket queue and we yet hold
135 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
136 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
143 /* what is the overhead of all these rwnd's */
144 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
145 asoc->my_rwnd = calc;
146 if (calc_w_oh == 0) {
148 * If our overhead is greater than the advertised rwnd, we
149 * clamp the rwnd to 1. This lets us still accept inbound
150 * segments, but hopefully will shut the sender down when he
151 * finally gets the message.
157 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
158 /* SWS engaged, tell peer none left */
164 /* Calculate what the rwnd would be */
167 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
169 uint32_t calc = 0, calc_w_oh;
172 * This is really set wrong with respect to a 1-2-m socket. Since
173 * the sb_cc is the count that everyone as put up. When we re-write
174 * sctp_soreceive then we will fix this so that ONLY this
175 * associations data is taken into account.
177 if (stcb->sctp_socket == NULL)
180 if (stcb->asoc.sb_cc == 0 &&
181 asoc->size_on_reasm_queue == 0 &&
182 asoc->size_on_all_streams == 0) {
183 /* Full rwnd granted */
184 calc = max(stcb->sctp_socket->so_rcv.sb_hiwat,
188 /* get actual space */
189 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
192 * take out what has NOT been put on socket queue and we yet hold
195 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
196 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
202 /* what is the overhead of all these rwnd's */
203 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
204 if (calc_w_oh == 0) {
206 * If our overhead is greater than the advertised rwnd, we
207 * clamp the rwnd to 1. This lets us still accept inbound
208 * segments, but hopefully will shut the sender down when he
209 * finally gets the message.
215 (calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
216 /* SWS engaged, tell peer none left */
226 * Build out our readq entry based on the incoming packet.
228 struct sctp_queued_to_read *
229 sctp_build_readq_entry(struct sctp_tcb *stcb,
230 struct sctp_nets *net,
231 uint32_t tsn, uint32_t ppid,
232 uint32_t context, uint16_t stream_no,
233 uint16_t stream_seq, uint8_t flags,
236 struct sctp_queued_to_read *read_queue_e = NULL;
238 sctp_alloc_a_readq(stcb, read_queue_e);
239 if (read_queue_e == NULL) {
242 read_queue_e->sinfo_stream = stream_no;
243 read_queue_e->sinfo_ssn = stream_seq;
244 read_queue_e->sinfo_flags = (flags << 8);
245 read_queue_e->sinfo_ppid = ppid;
246 read_queue_e->sinfo_context = stcb->asoc.context;
247 read_queue_e->sinfo_timetolive = 0;
248 read_queue_e->sinfo_tsn = tsn;
249 read_queue_e->sinfo_cumtsn = tsn;
250 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
251 read_queue_e->whoFrom = net;
252 read_queue_e->length = 0;
253 atomic_add_int(&net->ref_count, 1);
254 read_queue_e->data = dm;
255 read_queue_e->tail_mbuf = NULL;
256 read_queue_e->stcb = stcb;
257 read_queue_e->port_from = stcb->rport;
258 read_queue_e->do_not_ref_stcb = 0;
259 read_queue_e->end_added = 0;
260 read_queue_e->pdapi_aborted = 0;
262 return (read_queue_e);
267 * Build out our readq entry based on the incoming packet.
269 static struct sctp_queued_to_read *
270 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
271 struct sctp_tmit_chunk *chk)
273 struct sctp_queued_to_read *read_queue_e = NULL;
275 sctp_alloc_a_readq(stcb, read_queue_e);
276 if (read_queue_e == NULL) {
279 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
280 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
281 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
282 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
283 read_queue_e->sinfo_context = stcb->asoc.context;
284 read_queue_e->sinfo_timetolive = 0;
285 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
286 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
287 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
288 read_queue_e->whoFrom = chk->whoTo;
289 read_queue_e->length = 0;
290 atomic_add_int(&chk->whoTo->ref_count, 1);
291 read_queue_e->data = chk->data;
292 read_queue_e->tail_mbuf = NULL;
293 read_queue_e->stcb = stcb;
294 read_queue_e->port_from = stcb->rport;
295 read_queue_e->do_not_ref_stcb = 0;
296 read_queue_e->end_added = 0;
297 read_queue_e->pdapi_aborted = 0;
299 return (read_queue_e);
304 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
305 struct sctp_sndrcvinfo *sinfo)
307 struct sctp_sndrcvinfo *outinfo;
311 int use_extended = 0;
313 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
314 /* user does not want the sndrcv ctl */
317 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
319 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
321 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
325 ret = sctp_get_mbuf_for_msg(len,
326 1, M_DONTWAIT, 1, MT_DATA);
332 /* We need a CMSG header followed by the struct */
333 cmh = mtod(ret, struct cmsghdr *);
334 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
335 cmh->cmsg_level = IPPROTO_SCTP;
337 cmh->cmsg_type = SCTP_EXTRCV;
339 memcpy(outinfo, sinfo, len);
341 cmh->cmsg_type = SCTP_SNDRCV;
345 ret->m_len = cmh->cmsg_len;
346 ret->m_pkthdr.len = ret->m_len;
351 * We are delivering currently from the reassembly queue. We must continue to
352 * deliver until we either: 1) run out of space. 2) run out of sequential
353 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
356 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
358 struct sctp_tmit_chunk *chk;
365 cntDel = stream_no = 0;
366 struct sctp_queued_to_read *control, *ctl, *ctlat;
368 if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
369 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
371 /* socket above is long gone */
372 asoc->fragmented_delivery_inprogress = 0;
373 chk = TAILQ_FIRST(&asoc->reasmqueue);
375 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
376 asoc->size_on_reasm_queue -= chk->send_size;
377 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
379 * Lose the data pointer, since its in the socket
383 sctp_m_freem(chk->data);
386 /* Now free the address and data */
387 sctp_free_remote_addr(chk->whoTo);
388 sctp_free_a_chunk(stcb, chk);
389 chk = TAILQ_FIRST(&asoc->reasmqueue);
393 SCTP_TCB_LOCK_ASSERT(stcb);
395 chk = TAILQ_FIRST(&asoc->reasmqueue);
399 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
400 /* Can't deliver more :< */
403 stream_no = chk->rec.data.stream_number;
404 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
405 if (nxt_todel != chk->rec.data.stream_seq &&
406 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
408 * Not the next sequence to deliver in its stream OR
413 if ((chk->data->m_flags & M_PKTHDR) == 0) {
414 m = sctp_get_mbuf_for_msg(1,
415 1, M_DONTWAIT, 1, MT_DATA);
420 m->m_pkthdr.len = chk->send_size;
422 m->m_next = chk->data;
425 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
426 if (chk->data->m_next == NULL) {
427 /* hopefully we hit here most of the time */
428 chk->data->m_flags |= M_EOR;
431 * Add the flag to the LAST mbuf in the
435 while (m->m_next != NULL) {
441 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
443 control = sctp_build_readq_entry_chk(stcb, chk);
444 if (control == NULL) {
448 /* save it off for our future deliveries */
449 stcb->asoc.control_pdapi = control;
450 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
454 sctp_add_to_readq(stcb->sctp_ep,
455 stcb, control, &stcb->sctp_socket->so_rcv, end);
458 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
462 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
463 stcb->asoc.control_pdapi,
464 chk->data, end, chk->rec.data.TSN_seq,
465 &stcb->sctp_socket->so_rcv)) {
467 * something is very wrong, either
468 * control_pdapi is NULL, or the tail_mbuf
469 * is corrupt, or there is a EOM already on
472 if (stcb->asoc.control_pdapi == NULL) {
473 panic("This should not happen control_pdapi NULL?");
475 if (stcb->asoc.control_pdapi->tail_mbuf == NULL) {
476 panic("This should not happen, tail_mbuf not being maintained?");
478 /* if we did not panic, it was a EOM */
479 panic("Bad chunking ??");
483 /* pull it we did it */
484 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
485 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
486 asoc->fragmented_delivery_inprogress = 0;
487 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
488 asoc->strmin[stream_no].last_sequence_delivered++;
490 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
491 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
493 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
495 * turn the flag back on since we just delivered
498 asoc->fragmented_delivery_inprogress = 1;
500 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
501 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
502 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
503 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
505 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
506 asoc->size_on_reasm_queue -= chk->send_size;
507 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
508 /* free up the chk */
510 sctp_free_remote_addr(chk->whoTo);
511 sctp_free_a_chunk(stcb, chk);
513 if (asoc->fragmented_delivery_inprogress == 0) {
515 * Now lets see if we can deliver the next one on
519 struct sctp_stream_in *strm;
521 strm = &asoc->strmin[stream_no];
522 nxt_todel = strm->last_sequence_delivered + 1;
523 ctl = TAILQ_FIRST(&strm->inqueue);
524 if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
525 while (ctl != NULL) {
526 /* Deliver more if we can. */
527 if (nxt_todel == ctl->sinfo_ssn) {
528 ctlat = TAILQ_NEXT(ctl, next);
529 TAILQ_REMOVE(&strm->inqueue, ctl, next);
530 asoc->size_on_all_streams -= ctl->length;
531 sctp_ucount_decr(asoc->cnt_on_all_streams);
532 strm->last_sequence_delivered++;
533 sctp_add_to_readq(stcb->sctp_ep, stcb,
535 &stcb->sctp_socket->so_rcv, 1);
540 nxt_todel = strm->last_sequence_delivered + 1;
545 chk = TAILQ_FIRST(&asoc->reasmqueue);
550 * Queue the chunk either right into the socket buffer if it is the next one
551 * to go OR put it in the correct place in the delivery queue. If we do
552 * append to the so_buf, keep doing so until we are out of order. One big
553 * question still remains, what to do when the socket buffer is FULL??
556 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
557 struct sctp_queued_to_read *control, int *abort_flag)
560 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
561 * all the data in one stream this could happen quite rapidly. One
562 * could use the TSN to keep track of things, but this scheme breaks
563 * down in the other type of stream useage that could occur. Send a
564 * single msg to stream 0, send 4Billion messages to stream 1, now
565 * send a message to stream 0. You have a situation where the TSN
566 * has wrapped but not in the stream. Is this worth worrying about
567 * or should we just change our queue sort at the bottom to be by
570 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
571 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
572 * assignment this could happen... and I don't see how this would be
573 * a violation. So for now I am undecided an will leave the sort by
574 * SSN alone. Maybe a hybred approach is the answer
577 struct sctp_stream_in *strm;
578 struct sctp_queued_to_read *at;
584 asoc->size_on_all_streams += control->length;
585 sctp_ucount_incr(asoc->cnt_on_all_streams);
586 strm = &asoc->strmin[control->sinfo_stream];
587 nxt_todel = strm->last_sequence_delivered + 1;
588 #ifdef SCTP_STR_LOGGING
589 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
592 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
593 printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
594 (uint32_t) control->sinfo_stream,
595 (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel);
598 if (compare_with_wrap(strm->last_sequence_delivered,
599 control->sinfo_ssn, MAX_SEQ) ||
600 (strm->last_sequence_delivered == control->sinfo_ssn)) {
601 /* The incoming sseq is behind where we last delivered? */
603 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
604 printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
606 strm->last_sequence_delivered);
610 * throw it in the stream so it gets cleaned up in
611 * association destruction
613 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
614 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
615 0, M_DONTWAIT, 1, MT_DATA);
617 struct sctp_paramhdr *ph;
620 oper->m_len = sizeof(struct sctp_paramhdr) +
621 (sizeof(uint32_t) * 3);
622 ph = mtod(oper, struct sctp_paramhdr *);
623 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
624 ph->param_length = htons(oper->m_len);
625 ippp = (uint32_t *) (ph + 1);
626 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
628 *ippp = control->sinfo_tsn;
630 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
632 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
633 sctp_abort_an_association(stcb->sctp_ep, stcb,
634 SCTP_PEER_FAULTY, oper);
640 if (nxt_todel == control->sinfo_ssn) {
641 /* can be delivered right away? */
642 #ifdef SCTP_STR_LOGGING
643 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
646 asoc->size_on_all_streams -= control->length;
647 sctp_ucount_decr(asoc->cnt_on_all_streams);
648 strm->last_sequence_delivered++;
649 sctp_add_to_readq(stcb->sctp_ep, stcb,
651 &stcb->sctp_socket->so_rcv, 1);
652 control = TAILQ_FIRST(&strm->inqueue);
653 while (control != NULL) {
655 nxt_todel = strm->last_sequence_delivered + 1;
656 if (nxt_todel == control->sinfo_ssn) {
657 at = TAILQ_NEXT(control, next);
658 TAILQ_REMOVE(&strm->inqueue, control, next);
659 asoc->size_on_all_streams -= control->length;
660 sctp_ucount_decr(asoc->cnt_on_all_streams);
661 strm->last_sequence_delivered++;
663 * We ignore the return of deliver_data here
664 * since we always can hold the chunk on the
665 * d-queue. And we have a finite number that
666 * can be delivered from the strq.
668 #ifdef SCTP_STR_LOGGING
669 sctp_log_strm_del(control, NULL,
670 SCTP_STR_LOG_FROM_IMMED_DEL);
672 sctp_add_to_readq(stcb->sctp_ep, stcb,
674 &stcb->sctp_socket->so_rcv, 1);
683 * Ok, we did not deliver this guy, find the correct place
684 * to put it on the queue.
686 if (TAILQ_EMPTY(&strm->inqueue)) {
688 #ifdef SCTP_STR_LOGGING
689 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
691 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
693 TAILQ_FOREACH(at, &strm->inqueue, next) {
694 if (compare_with_wrap(at->sinfo_ssn,
695 control->sinfo_ssn, MAX_SEQ)) {
697 * one in queue is bigger than the
698 * new one, insert before this one
700 #ifdef SCTP_STR_LOGGING
701 sctp_log_strm_del(control, at,
702 SCTP_STR_LOG_FROM_INSERT_MD);
704 TAILQ_INSERT_BEFORE(at, control, next);
706 } else if (at->sinfo_ssn == control->sinfo_ssn) {
708 * Gak, He sent me a duplicate str
712 * foo bar, I guess I will just free
713 * this new guy, should we abort
714 * too? FIX ME MAYBE? Or it COULD be
715 * that the SSN's have wrapped.
716 * Maybe I should compare to TSN
717 * somehow... sigh for now just blow
722 sctp_m_freem(control->data);
723 control->data = NULL;
724 asoc->size_on_all_streams -= control->length;
725 sctp_ucount_decr(asoc->cnt_on_all_streams);
726 sctp_free_remote_addr(control->whoFrom);
727 sctp_free_a_readq(stcb, control);
730 if (TAILQ_NEXT(at, next) == NULL) {
732 * We are at the end, insert
735 #ifdef SCTP_STR_LOGGING
736 sctp_log_strm_del(control, at,
737 SCTP_STR_LOG_FROM_INSERT_TL);
739 TAILQ_INSERT_AFTER(&strm->inqueue,
750 * Returns two things: You get the total size of the deliverable parts of the
751 * first fragmented message on the reassembly queue. And you get a 1 back if
752 * all of the message is ready or a 0 back if the message is still incomplete
755 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
757 struct sctp_tmit_chunk *chk;
761 chk = TAILQ_FIRST(&asoc->reasmqueue);
763 /* nothing on the queue */
766 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
767 /* Not a first on the queue */
770 tsn = chk->rec.data.TSN_seq;
772 if (tsn != chk->rec.data.TSN_seq) {
775 *t_size += chk->send_size;
776 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
780 chk = TAILQ_NEXT(chk, sctp_next);
786 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
788 struct sctp_tmit_chunk *chk;
792 chk = TAILQ_FIRST(&asoc->reasmqueue);
795 asoc->size_on_reasm_queue = 0;
796 asoc->cnt_on_reasm_queue = 0;
799 if (asoc->fragmented_delivery_inprogress == 0) {
801 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
802 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
803 (nxt_todel == chk->rec.data.stream_seq ||
804 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
806 * Yep the first one is here and its ok to deliver
809 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
810 (tsize > stcb->sctp_ep->partial_delivery_point))) {
813 * Yes, we setup to start reception, by
814 * backing down the TSN just in case we
815 * can't deliver. If we
817 asoc->fragmented_delivery_inprogress = 1;
818 asoc->tsn_last_delivered =
819 chk->rec.data.TSN_seq - 1;
821 chk->rec.data.stream_number;
822 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
823 asoc->pdapi_ppid = chk->rec.data.payloadtype;
824 asoc->fragment_flags = chk->rec.data.rcv_flags;
825 sctp_service_reassembly(stcb, asoc);
829 sctp_service_reassembly(stcb, asoc);
834 * Dump onto the re-assembly queue, in its proper place. After dumping on the
835 * queue, see if anthing can be delivered. If so pull it off (or as much as
836 * we can. If we run out of space then we must dump what we can and set the
837 * appropriate flag to say we queued what we could.
840 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
841 struct sctp_tmit_chunk *chk, int *abort_flag)
844 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
846 struct sctp_tmit_chunk *at, *prev, *next;
849 cum_ackp1 = asoc->tsn_last_delivered + 1;
850 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
851 /* This is the first one on the queue */
852 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
854 * we do not check for delivery of anything when only one
857 asoc->size_on_reasm_queue = chk->send_size;
858 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
859 if (chk->rec.data.TSN_seq == cum_ackp1) {
860 if (asoc->fragmented_delivery_inprogress == 0 &&
861 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
862 SCTP_DATA_FIRST_FRAG) {
864 * An empty queue, no delivery inprogress,
865 * we hit the next one and it does NOT have
866 * a FIRST fragment mark.
869 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
870 printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n");
873 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
874 0, M_DONTWAIT, 1, MT_DATA);
877 struct sctp_paramhdr *ph;
881 sizeof(struct sctp_paramhdr) +
882 (sizeof(uint32_t) * 3);
883 ph = mtod(oper, struct sctp_paramhdr *);
885 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
886 ph->param_length = htons(oper->m_len);
887 ippp = (uint32_t *) (ph + 1);
888 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
890 *ippp = chk->rec.data.TSN_seq;
892 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
895 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
896 sctp_abort_an_association(stcb->sctp_ep, stcb,
897 SCTP_PEER_FAULTY, oper);
899 } else if (asoc->fragmented_delivery_inprogress &&
900 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
902 * We are doing a partial delivery and the
903 * NEXT chunk MUST be either the LAST or
904 * MIDDLE fragment NOT a FIRST
907 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
908 printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
911 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
912 0, M_DONTWAIT, 1, MT_DATA);
914 struct sctp_paramhdr *ph;
918 sizeof(struct sctp_paramhdr) +
919 (3 * sizeof(uint32_t));
920 ph = mtod(oper, struct sctp_paramhdr *);
922 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
923 ph->param_length = htons(oper->m_len);
924 ippp = (uint32_t *) (ph + 1);
925 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
927 *ippp = chk->rec.data.TSN_seq;
929 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
931 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
932 sctp_abort_an_association(stcb->sctp_ep, stcb,
933 SCTP_PEER_FAULTY, oper);
935 } else if (asoc->fragmented_delivery_inprogress) {
937 * Here we are ok with a MIDDLE or LAST
940 if (chk->rec.data.stream_number !=
941 asoc->str_of_pdapi) {
942 /* Got to be the right STR No */
944 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
945 printf("Gak, Evil plot, it IS not same stream number %d vs %d\n",
946 chk->rec.data.stream_number,
950 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
951 0, M_DONTWAIT, 1, MT_DATA);
953 struct sctp_paramhdr *ph;
957 sizeof(struct sctp_paramhdr) +
958 (sizeof(uint32_t) * 3);
960 struct sctp_paramhdr *);
962 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
965 ippp = (uint32_t *) (ph + 1);
966 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
968 *ippp = chk->rec.data.TSN_seq;
970 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
972 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
973 sctp_abort_an_association(stcb->sctp_ep,
974 stcb, SCTP_PEER_FAULTY, oper);
976 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
977 SCTP_DATA_UNORDERED &&
978 chk->rec.data.stream_seq !=
979 asoc->ssn_of_pdapi) {
980 /* Got to be the right STR Seq */
982 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
983 printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n",
984 chk->rec.data.stream_seq,
988 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
989 0, M_DONTWAIT, 1, MT_DATA);
991 struct sctp_paramhdr *ph;
995 sizeof(struct sctp_paramhdr) +
996 (3 * sizeof(uint32_t));
998 struct sctp_paramhdr *);
1000 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1003 ippp = (uint32_t *) (ph + 1);
1004 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1006 *ippp = chk->rec.data.TSN_seq;
1008 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1011 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1012 sctp_abort_an_association(stcb->sctp_ep,
1013 stcb, SCTP_PEER_FAULTY, oper);
1020 /* Find its place */
1021 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1022 if (compare_with_wrap(at->rec.data.TSN_seq,
1023 chk->rec.data.TSN_seq, MAX_TSN)) {
1025 * one in queue is bigger than the new one, insert
1029 asoc->size_on_reasm_queue += chk->send_size;
1030 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1032 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1034 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1035 /* Gak, He sent me a duplicate str seq number */
1037 * foo bar, I guess I will just free this new guy,
1038 * should we abort too? FIX ME MAYBE? Or it COULD be
1039 * that the SSN's have wrapped. Maybe I should
1040 * compare to TSN somehow... sigh for now just blow
1044 sctp_m_freem(chk->data);
1047 sctp_free_remote_addr(chk->whoTo);
1048 sctp_free_a_chunk(stcb, chk);
1051 last_flags = at->rec.data.rcv_flags;
1052 last_tsn = at->rec.data.TSN_seq;
1054 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1056 * We are at the end, insert it after this
1059 /* check it first */
1060 asoc->size_on_reasm_queue += chk->send_size;
1061 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1062 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1067 /* Now the audits */
1069 prev_tsn = chk->rec.data.TSN_seq - 1;
1070 if (prev_tsn == prev->rec.data.TSN_seq) {
1072 * Ok the one I am dropping onto the end is the
1073 * NEXT. A bit of valdiation here.
1075 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1076 SCTP_DATA_FIRST_FRAG ||
1077 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1078 SCTP_DATA_MIDDLE_FRAG) {
1080 * Insert chk MUST be a MIDDLE or LAST
1083 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1084 SCTP_DATA_FIRST_FRAG) {
1086 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1087 printf("Prev check - It can be a midlle or last but not a first\n");
1088 printf("Gak, Evil plot, it's a FIRST!\n");
1091 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1092 0, M_DONTWAIT, 1, MT_DATA);
1094 struct sctp_paramhdr *ph;
1098 sizeof(struct sctp_paramhdr) +
1099 (3 * sizeof(uint32_t));
1101 struct sctp_paramhdr *);
1103 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1106 ippp = (uint32_t *) (ph + 1);
1107 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1109 *ippp = chk->rec.data.TSN_seq;
1111 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1114 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1115 sctp_abort_an_association(stcb->sctp_ep,
1116 stcb, SCTP_PEER_FAULTY, oper);
1120 if (chk->rec.data.stream_number !=
1121 prev->rec.data.stream_number) {
1123 * Huh, need the correct STR here,
1124 * they must be the same.
1127 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1128 printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1129 chk->rec.data.stream_number,
1130 prev->rec.data.stream_number);
1133 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1134 0, M_DONTWAIT, 1, MT_DATA);
1136 struct sctp_paramhdr *ph;
1140 sizeof(struct sctp_paramhdr) +
1141 (3 * sizeof(uint32_t));
1143 struct sctp_paramhdr *);
1145 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1148 ippp = (uint32_t *) (ph + 1);
1149 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1151 *ippp = chk->rec.data.TSN_seq;
1153 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1155 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1156 sctp_abort_an_association(stcb->sctp_ep,
1157 stcb, SCTP_PEER_FAULTY, oper);
1162 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1163 chk->rec.data.stream_seq !=
1164 prev->rec.data.stream_seq) {
1166 * Huh, need the correct STR here,
1167 * they must be the same.
1170 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1171 printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1172 chk->rec.data.stream_seq,
1173 prev->rec.data.stream_seq);
1176 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1177 0, M_DONTWAIT, 1, MT_DATA);
1179 struct sctp_paramhdr *ph;
1183 sizeof(struct sctp_paramhdr) +
1184 (3 * sizeof(uint32_t));
1186 struct sctp_paramhdr *);
1188 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1191 ippp = (uint32_t *) (ph + 1);
1192 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1194 *ippp = chk->rec.data.TSN_seq;
1196 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1198 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1199 sctp_abort_an_association(stcb->sctp_ep,
1200 stcb, SCTP_PEER_FAULTY, oper);
1205 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1206 SCTP_DATA_LAST_FRAG) {
1207 /* Insert chk MUST be a FIRST */
1208 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1209 SCTP_DATA_FIRST_FRAG) {
1211 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1212 printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1215 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1216 0, M_DONTWAIT, 1, MT_DATA);
1218 struct sctp_paramhdr *ph;
1222 sizeof(struct sctp_paramhdr) +
1223 (3 * sizeof(uint32_t));
1225 struct sctp_paramhdr *);
1227 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1230 ippp = (uint32_t *) (ph + 1);
1231 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1233 *ippp = chk->rec.data.TSN_seq;
1235 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1238 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1239 sctp_abort_an_association(stcb->sctp_ep,
1240 stcb, SCTP_PEER_FAULTY, oper);
1249 post_tsn = chk->rec.data.TSN_seq + 1;
1250 if (post_tsn == next->rec.data.TSN_seq) {
1252 * Ok the one I am inserting ahead of is my NEXT
1253 * one. A bit of valdiation here.
1255 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1256 /* Insert chk MUST be a last fragment */
1257 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1258 != SCTP_DATA_LAST_FRAG) {
1260 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1261 printf("Next chk - Next is FIRST, we must be LAST\n");
1262 printf("Gak, Evil plot, its not a last!\n");
1265 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1266 0, M_DONTWAIT, 1, MT_DATA);
1268 struct sctp_paramhdr *ph;
1272 sizeof(struct sctp_paramhdr) +
1273 (3 * sizeof(uint32_t));
1275 struct sctp_paramhdr *);
1277 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1280 ippp = (uint32_t *) (ph + 1);
1281 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1283 *ippp = chk->rec.data.TSN_seq;
1285 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1287 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1288 sctp_abort_an_association(stcb->sctp_ep,
1289 stcb, SCTP_PEER_FAULTY, oper);
1294 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1295 SCTP_DATA_MIDDLE_FRAG ||
1296 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1297 SCTP_DATA_LAST_FRAG) {
1299 * Insert chk CAN be MIDDLE or FIRST NOT
1302 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1303 SCTP_DATA_LAST_FRAG) {
1305 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1306 printf("Next chk - Next is a MIDDLE/LAST\n");
1307 printf("Gak, Evil plot, new prev chunk is a LAST\n");
1310 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1311 0, M_DONTWAIT, 1, MT_DATA);
1313 struct sctp_paramhdr *ph;
1317 sizeof(struct sctp_paramhdr) +
1318 (3 * sizeof(uint32_t));
1320 struct sctp_paramhdr *);
1322 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1325 ippp = (uint32_t *) (ph + 1);
1326 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1328 *ippp = chk->rec.data.TSN_seq;
1330 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1333 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1334 sctp_abort_an_association(stcb->sctp_ep,
1335 stcb, SCTP_PEER_FAULTY, oper);
1340 if (chk->rec.data.stream_number !=
1341 next->rec.data.stream_number) {
1343 * Huh, need the correct STR here,
1344 * they must be the same.
1347 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1348 printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1349 chk->rec.data.stream_number,
1350 next->rec.data.stream_number);
1353 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1354 0, M_DONTWAIT, 1, MT_DATA);
1356 struct sctp_paramhdr *ph;
1360 sizeof(struct sctp_paramhdr) +
1361 (3 * sizeof(uint32_t));
1363 struct sctp_paramhdr *);
1365 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1368 ippp = (uint32_t *) (ph + 1);
1369 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1371 *ippp = chk->rec.data.TSN_seq;
1373 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1376 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1377 sctp_abort_an_association(stcb->sctp_ep,
1378 stcb, SCTP_PEER_FAULTY, oper);
1383 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1384 chk->rec.data.stream_seq !=
1385 next->rec.data.stream_seq) {
1387 * Huh, need the correct STR here,
1388 * they must be the same.
1391 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1392 printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1393 chk->rec.data.stream_seq,
1394 next->rec.data.stream_seq);
1397 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1398 0, M_DONTWAIT, 1, MT_DATA);
1400 struct sctp_paramhdr *ph;
1404 sizeof(struct sctp_paramhdr) +
1405 (3 * sizeof(uint32_t));
1407 struct sctp_paramhdr *);
1409 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1412 ippp = (uint32_t *) (ph + 1);
1413 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1415 *ippp = chk->rec.data.TSN_seq;
1417 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1419 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1420 sctp_abort_an_association(stcb->sctp_ep,
1421 stcb, SCTP_PEER_FAULTY, oper);
1430 /* Do we need to do some delivery? check */
1431 sctp_deliver_reasm_check(stcb, asoc);
1435 * This is an unfortunate routine. It checks to make sure a evil guy is not
1436 * stuffing us full of bad packet fragments. A broken peer could also do this
1437 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1441 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1444 struct sctp_tmit_chunk *at;
1447 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1448 if (compare_with_wrap(TSN_seq,
1449 at->rec.data.TSN_seq, MAX_TSN)) {
1450 /* is it one bigger? */
1451 tsn_est = at->rec.data.TSN_seq + 1;
1452 if (tsn_est == TSN_seq) {
1453 /* yep. It better be a last then */
1454 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1455 SCTP_DATA_LAST_FRAG) {
1457 * Ok this guy belongs next to a guy
1458 * that is NOT last, it should be a
1459 * middle/last, not a complete
1465 * This guy is ok since its a LAST
1466 * and the new chunk is a fully
1467 * self- contained one.
1472 } else if (TSN_seq == at->rec.data.TSN_seq) {
1473 /* Software error since I have a dup? */
1477 * Ok, 'at' is larger than new chunk but does it
1478 * need to be right before it.
1480 tsn_est = TSN_seq + 1;
1481 if (tsn_est == at->rec.data.TSN_seq) {
1482 /* Yep, It better be a first */
1483 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1484 SCTP_DATA_FIRST_FRAG) {
1496 extern unsigned int sctp_max_chunks_on_queue;
1498 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1499 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1500 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1501 int *break_flag, int last_chunk)
1503 /* Process a data chunk */
1504 /* struct sctp_tmit_chunk *chk; */
1505 struct sctp_tmit_chunk *chk;
1509 uint16_t strmno, strmseq;
1511 struct sctp_queued_to_read *control;
1514 tsn = ntohl(ch->dp.tsn);
1515 #ifdef SCTP_MAP_LOGGING
1516 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
1518 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1519 asoc->cumulative_tsn == tsn) {
1520 /* It is a duplicate */
1521 SCTP_STAT_INCR(sctps_recvdupdata);
1522 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1523 /* Record a dup for the next outbound sack */
1524 asoc->dup_tsns[asoc->numduptsns] = tsn;
1529 /* Calculate the number of TSN's between the base and this TSN */
1530 if (tsn >= asoc->mapping_array_base_tsn) {
1531 gap = tsn - asoc->mapping_array_base_tsn;
1533 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1535 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1536 /* Can't hold the bit in the mapping at max array, toss it */
1539 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1540 if (sctp_expand_mapping_array(asoc)) {
1541 /* Can't expand, drop it */
1545 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1548 /* See if we have received this one already */
1549 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1550 SCTP_STAT_INCR(sctps_recvdupdata);
1551 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1552 /* Record a dup for the next outbound sack */
1553 asoc->dup_tsns[asoc->numduptsns] = tsn;
1556 if (!callout_pending(&asoc->dack_timer.timer)) {
1558 * By starting the timer we assure that we WILL sack
1559 * at the end of the packet when sctp_sack_check
1562 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep,
1568 * Check to see about the GONE flag, duplicates would cause a sack
1569 * to be sent up above
1571 if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1572 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1573 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1576 * wait a minute, this guy is gone, there is no longer a
1577 * receiver. Send peer an ABORT!
1579 struct mbuf *op_err;
1581 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1582 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
1587 * Now before going further we see if there is room. If NOT then we
1588 * MAY let one through only IF this TSN is the one we are waiting
1589 * for on a partial delivery API.
1592 /* now do the tests */
1593 if (((asoc->cnt_on_all_streams +
1594 asoc->cnt_on_reasm_queue +
1595 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1596 (((int)asoc->my_rwnd) <= 0)) {
1598 * When we have NO room in the rwnd we check to make sure
1599 * the reader is doing its job...
1601 if (stcb->sctp_socket->so_rcv.sb_cc) {
1602 /* some to read, wake-up */
1603 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1605 /* now is it in the mapping array of what we have accepted? */
1606 if (compare_with_wrap(tsn,
1607 asoc->highest_tsn_inside_map, MAX_TSN)) {
1609 /* Nope not in the valid range dump it */
1611 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1612 printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n",
1613 (u_long)tsn, (u_long)asoc->my_rwnd,
1614 sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv));
1618 sctp_set_rwnd(stcb, asoc);
1619 if ((asoc->cnt_on_all_streams +
1620 asoc->cnt_on_reasm_queue +
1621 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1622 SCTP_STAT_INCR(sctps_datadropchklmt);
1624 SCTP_STAT_INCR(sctps_datadroprwnd);
1631 strmno = ntohs(ch->dp.stream_id);
1632 if (strmno >= asoc->streamincnt) {
1633 struct sctp_paramhdr *phdr;
1636 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1637 1, M_DONTWAIT, 1, MT_DATA);
1639 /* add some space up front so prepend will work well */
1640 mb->m_data += sizeof(struct sctp_chunkhdr);
1641 phdr = mtod(mb, struct sctp_paramhdr *);
1643 * Error causes are just param's and this one has
1644 * two back to back phdr, one with the error type
1645 * and size, the other with the streamid and a rsvd
1647 mb->m_pkthdr.len = mb->m_len =
1648 (sizeof(struct sctp_paramhdr) * 2);
1649 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1650 phdr->param_length =
1651 htons(sizeof(struct sctp_paramhdr) * 2);
1653 /* We insert the stream in the type field */
1654 phdr->param_type = ch->dp.stream_id;
1655 /* And set the length to 0 for the rsvd field */
1656 phdr->param_length = 0;
1657 sctp_queue_op_err(stcb, mb);
1659 SCTP_STAT_INCR(sctps_badsid);
1663 * Before we continue lets validate that we are not being fooled by
1664 * an evil attacker. We can only have 4k chunks based on our TSN
1665 * spread allowed by the mapping array 512 * 8 bits, so there is no
1666 * way our stream sequence numbers could have wrapped. We of course
1667 * only validate the FIRST fragment so the bit must be set.
1669 strmseq = ntohs(ch->dp.stream_sequence);
1670 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1671 (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1672 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1673 strmseq, MAX_SEQ) ||
1674 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1675 /* The incoming sseq is behind where we last delivered? */
1677 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1678 printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1680 asoc->strmin[strmno].last_sequence_delivered);
1684 * throw it in the stream so it gets cleaned up in
1685 * association destruction
1687 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1688 0, M_DONTWAIT, 1, MT_DATA);
1690 struct sctp_paramhdr *ph;
1693 oper->m_len = sizeof(struct sctp_paramhdr) +
1694 (3 * sizeof(uint32_t));
1695 ph = mtod(oper, struct sctp_paramhdr *);
1696 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1697 ph->param_length = htons(oper->m_len);
1698 ippp = (uint32_t *) (ph + 1);
1699 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1703 *ippp = ((strmno << 16) | strmseq);
1706 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1707 sctp_abort_an_association(stcb->sctp_ep, stcb,
1708 SCTP_PEER_FAULTY, oper);
1712 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1713 if (last_chunk == 0) {
1714 dmbuf = sctp_m_copym(*m,
1715 (offset + sizeof(struct sctp_data_chunk)),
1716 the_len, M_DONTWAIT);
1717 #ifdef SCTP_MBUF_LOGGING
1723 if (mat->m_flags & M_EXT) {
1724 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1731 /* We can steal the last chunk */
1733 /* lop off the top part */
1734 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1735 if (dmbuf->m_pkthdr.len > the_len) {
1736 /* Trim the end round bytes off too */
1737 m_adj(dmbuf, -(dmbuf->m_pkthdr.len - the_len));
1740 if (dmbuf == NULL) {
1741 SCTP_STAT_INCR(sctps_nomem);
1744 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1745 asoc->fragmented_delivery_inprogress == 0 &&
1746 TAILQ_EMPTY(&asoc->resetHead) &&
1747 ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) ||
1748 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1749 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1750 /* Candidate for express delivery */
1752 * Its not fragmented, No PD-API is up, Nothing in the
1753 * delivery queue, Its un-ordered OR ordered and the next to
1754 * deliver AND nothing else is stuck on the stream queue,
1755 * And there is room for it in the socket buffer. Lets just
1756 * stuff it up the buffer....
1759 /* It would be nice to avoid this copy if we could :< */
1760 sctp_alloc_a_readq(stcb, control);
1761 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1767 if (control == NULL) {
1768 goto failed_express_del;
1770 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1);
1771 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1772 /* for ordered, bump what we delivered */
1773 asoc->strmin[strmno].last_sequence_delivered++;
1775 SCTP_STAT_INCR(sctps_recvexpress);
1776 #ifdef SCTP_STR_LOGGING
1777 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1778 SCTP_STR_LOG_FROM_EXPRS_DEL);
1781 goto finish_express_del;
1784 /* If we reach here this is a new chunk */
1787 /* Express for fragmented delivery? */
1788 if ((asoc->fragmented_delivery_inprogress) &&
1789 (stcb->asoc.control_pdapi) &&
1790 (asoc->str_of_pdapi == strmno) &&
1791 (asoc->ssn_of_pdapi == strmseq)
1793 control = stcb->asoc.control_pdapi;
1794 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1795 /* Can't be another first? */
1796 goto failed_pdapi_express_del;
1798 if (tsn == (control->sinfo_tsn + 1)) {
1799 /* Yep, we can add it on */
1803 if (ch->ch.chunk_flags & SCTP_DATA_LAST_FRAG) {
1806 cumack = asoc->cumulative_tsn;
1807 if ((cumack + 1) == tsn)
1810 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1812 &stcb->sctp_socket->so_rcv)) {
1813 printf("Append fails end:%d\n", end);
1814 goto failed_pdapi_express_del;
1816 SCTP_STAT_INCR(sctps_recvexpressm);
1817 control->sinfo_tsn = tsn;
1818 asoc->tsn_last_delivered = tsn;
1819 asoc->fragment_flags = ch->ch.chunk_flags;
1820 asoc->tsn_of_pdapi_last_delivered = tsn;
1821 asoc->last_flags_delivered = ch->ch.chunk_flags;
1822 asoc->last_strm_seq_delivered = strmseq;
1823 asoc->last_strm_no_delivered = strmno;
1826 /* clean up the flags and such */
1827 asoc->fragmented_delivery_inprogress = 0;
1828 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1829 asoc->strmin[strmno].last_sequence_delivered++;
1831 stcb->asoc.control_pdapi = NULL;
1834 goto finish_express_del;
1837 failed_pdapi_express_del:
1839 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1840 sctp_alloc_a_chunk(stcb, chk);
1842 /* No memory so we drop the chunk */
1843 SCTP_STAT_INCR(sctps_nomem);
1844 if (last_chunk == 0) {
1845 /* we copied it, free the copy */
1846 sctp_m_freem(dmbuf);
1850 chk->rec.data.TSN_seq = tsn;
1851 chk->no_fr_allowed = 0;
1852 chk->rec.data.stream_seq = strmseq;
1853 chk->rec.data.stream_number = strmno;
1854 chk->rec.data.payloadtype = ch->dp.protocol_id;
1855 chk->rec.data.context = stcb->asoc.context;
1856 chk->rec.data.doing_fast_retransmit = 0;
1857 chk->rec.data.rcv_flags = ch->ch.chunk_flags;
1859 chk->send_size = the_len;
1861 atomic_add_int(&net->ref_count, 1);
1864 sctp_alloc_a_readq(stcb, control);
1865 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1871 if (control == NULL) {
1872 /* No memory so we drop the chunk */
1873 SCTP_STAT_INCR(sctps_nomem);
1874 if (last_chunk == 0) {
1875 /* we copied it, free the copy */
1876 sctp_m_freem(dmbuf);
1880 control->length = the_len;
1883 /* Mark it as received */
1884 /* Now queue it where it belongs */
1885 if (control != NULL) {
1886 /* First a sanity check */
1887 if (asoc->fragmented_delivery_inprogress) {
1889 * Ok, we have a fragmented delivery in progress if
1890 * this chunk is next to deliver OR belongs in our
1891 * view to the reassembly, the peer is evil or
1894 uint32_t estimate_tsn;
1896 estimate_tsn = asoc->tsn_last_delivered + 1;
1897 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1898 (estimate_tsn == control->sinfo_tsn)) {
1899 /* Evil/Broke peer */
1900 sctp_m_freem(control->data);
1901 control->data = NULL;
1902 sctp_free_remote_addr(control->whoFrom);
1903 sctp_free_a_readq(stcb, control);
1904 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1905 0, M_DONTWAIT, 1, MT_DATA);
1907 struct sctp_paramhdr *ph;
1911 sizeof(struct sctp_paramhdr) +
1912 (3 * sizeof(uint32_t));
1913 ph = mtod(oper, struct sctp_paramhdr *);
1915 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1916 ph->param_length = htons(oper->m_len);
1917 ippp = (uint32_t *) (ph + 1);
1918 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1922 *ippp = ((strmno << 16) | strmseq);
1924 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1925 sctp_abort_an_association(stcb->sctp_ep, stcb,
1926 SCTP_PEER_FAULTY, oper);
1931 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1932 sctp_m_freem(control->data);
1933 control->data = NULL;
1934 sctp_free_remote_addr(control->whoFrom);
1935 sctp_free_a_readq(stcb, control);
1937 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1938 0, M_DONTWAIT, 1, MT_DATA);
1940 struct sctp_paramhdr *ph;
1944 sizeof(struct sctp_paramhdr) +
1945 (3 * sizeof(uint32_t));
1947 struct sctp_paramhdr *);
1949 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1952 ippp = (uint32_t *) (ph + 1);
1953 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1957 *ippp = ((strmno << 16) | strmseq);
1959 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1960 sctp_abort_an_association(stcb->sctp_ep,
1961 stcb, SCTP_PEER_FAULTY, oper);
1968 /* No PDAPI running */
1969 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1971 * Reassembly queue is NOT empty validate
1972 * that this tsn does not need to be in
1973 * reasembly queue. If it does then our peer
1974 * is broken or evil.
1976 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1977 sctp_m_freem(control->data);
1978 control->data = NULL;
1979 sctp_free_remote_addr(control->whoFrom);
1980 sctp_free_a_readq(stcb, control);
1981 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1982 0, M_DONTWAIT, 1, MT_DATA);
1984 struct sctp_paramhdr *ph;
1988 sizeof(struct sctp_paramhdr) +
1989 (3 * sizeof(uint32_t));
1991 struct sctp_paramhdr *);
1993 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1996 ippp = (uint32_t *) (ph + 1);
1997 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2001 *ippp = ((strmno << 16) | strmseq);
2003 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2004 sctp_abort_an_association(stcb->sctp_ep,
2005 stcb, SCTP_PEER_FAULTY, oper);
2012 /* ok, if we reach here we have passed the sanity checks */
2013 if (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) {
2014 /* queue directly into socket buffer */
2015 sctp_add_to_readq(stcb->sctp_ep, stcb,
2017 &stcb->sctp_socket->so_rcv, 1);
2020 * Special check for when streams are resetting. We
2021 * could be more smart about this and check the
2022 * actual stream to see if it is not being reset..
2023 * that way we would not create a HOLB when amongst
2024 * streams being reset and those not being reset.
2026 * We take complete messages that have a stream reset
2027 * intervening (aka the TSN is after where our
2028 * cum-ack needs to be) off and put them on a
2029 * pending_reply_queue. The reassembly ones we do
2030 * not have to worry about since they are all sorted
2031 * and proceessed by TSN order. It is only the
2032 * singletons I must worry about.
2034 struct sctp_stream_reset_list *liste;
2036 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2037 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)) ||
2038 (tsn == ntohl(liste->tsn)))
2041 * yep its past where we need to reset... go
2042 * ahead and queue it.
2044 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2046 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2048 struct sctp_queued_to_read *ctlOn;
2049 unsigned char inserted = 0;
2051 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2053 if (compare_with_wrap(control->sinfo_tsn,
2054 ctlOn->sinfo_tsn, MAX_TSN)) {
2055 ctlOn = TAILQ_NEXT(ctlOn, next);
2058 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2063 if (inserted == 0) {
2065 * must be put at end, use
2066 * prevP (all setup from
2067 * loop) to setup nextP.
2069 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2073 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2080 /* Into the re-assembly queue */
2081 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2084 * the assoc is now gone and chk was put onto the
2085 * reasm queue, which has all been freed.
2092 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2093 /* we have a new high score */
2094 asoc->highest_tsn_inside_map = tsn;
2095 #ifdef SCTP_MAP_LOGGING
2096 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2099 if (tsn == (asoc->cumulative_tsn + 1)) {
2100 /* Update cum-ack */
2101 asoc->cumulative_tsn = tsn;
2106 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2107 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2109 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2111 SCTP_STAT_INCR(sctps_recvdata);
2112 /* Set it present please */
2113 #ifdef SCTP_STR_LOGGING
2114 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2116 #ifdef SCTP_MAP_LOGGING
2117 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2118 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2120 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2124 int8_t sctp_map_lookup_tab[256] = {
2125 -1, 0, -1, 1, -1, 0, -1, 2,
2126 -1, 0, -1, 1, -1, 0, -1, 3,
2127 -1, 0, -1, 1, -1, 0, -1, 2,
2128 -1, 0, -1, 1, -1, 0, -1, 4,
2129 -1, 0, -1, 1, -1, 0, -1, 2,
2130 -1, 0, -1, 1, -1, 0, -1, 3,
2131 -1, 0, -1, 1, -1, 0, -1, 2,
2132 -1, 0, -1, 1, -1, 0, -1, 5,
2133 -1, 0, -1, 1, -1, 0, -1, 2,
2134 -1, 0, -1, 1, -1, 0, -1, 3,
2135 -1, 0, -1, 1, -1, 0, -1, 2,
2136 -1, 0, -1, 1, -1, 0, -1, 4,
2137 -1, 0, -1, 1, -1, 0, -1, 2,
2138 -1, 0, -1, 1, -1, 0, -1, 3,
2139 -1, 0, -1, 1, -1, 0, -1, 2,
2140 -1, 0, -1, 1, -1, 0, -1, 6,
2141 -1, 0, -1, 1, -1, 0, -1, 2,
2142 -1, 0, -1, 1, -1, 0, -1, 3,
2143 -1, 0, -1, 1, -1, 0, -1, 2,
2144 -1, 0, -1, 1, -1, 0, -1, 4,
2145 -1, 0, -1, 1, -1, 0, -1, 2,
2146 -1, 0, -1, 1, -1, 0, -1, 3,
2147 -1, 0, -1, 1, -1, 0, -1, 2,
2148 -1, 0, -1, 1, -1, 0, -1, 5,
2149 -1, 0, -1, 1, -1, 0, -1, 2,
2150 -1, 0, -1, 1, -1, 0, -1, 3,
2151 -1, 0, -1, 1, -1, 0, -1, 2,
2152 -1, 0, -1, 1, -1, 0, -1, 4,
2153 -1, 0, -1, 1, -1, 0, -1, 2,
2154 -1, 0, -1, 1, -1, 0, -1, 3,
2155 -1, 0, -1, 1, -1, 0, -1, 2,
2156 -1, 0, -1, 1, -1, 0, -1, 7,
2161 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2164 * Now we also need to check the mapping array in a couple of ways.
2165 * 1) Did we move the cum-ack point?
2167 struct sctp_association *asoc;
2170 int slide_from, slide_end, lgap, distance;
2172 #ifdef SCTP_MAP_LOGGING
2173 uint32_t old_cumack, old_base, old_highest;
2174 unsigned char aux_array[64];
2177 struct sctp_stream_reset_list *liste;
2182 #ifdef SCTP_MAP_LOGGING
2183 old_cumack = asoc->cumulative_tsn;
2184 old_base = asoc->mapping_array_base_tsn;
2185 old_highest = asoc->highest_tsn_inside_map;
2186 if (asoc->mapping_array_size < 64)
2187 memcpy(aux_array, asoc->mapping_array,
2188 asoc->mapping_array_size);
2190 memcpy(aux_array, asoc->mapping_array, 64);
2194 * We could probably improve this a small bit by calculating the
2195 * offset of the current cum-ack as the starting point.
2199 for (i = 0; i < stcb->asoc.mapping_array_size; i++) {
2200 if (asoc->mapping_array[i] == 0xff) {
2203 /* there is a 0 bit */
2205 at += sctp_map_lookup_tab[asoc->mapping_array[i]];
2209 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + at;
2210 /* at is one off, since in the table a embedded -1 is present */
2213 if (compare_with_wrap(asoc->cumulative_tsn,
2214 asoc->highest_tsn_inside_map,
2217 panic("huh, cumack greater than high-tsn in map");
2219 printf("huh, cumack greater than high-tsn in map - should panic?\n");
2220 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2224 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
2225 /* The complete array was completed by a single FR */
2226 /* higest becomes the cum-ack */
2229 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2230 /* clear the array */
2232 clr = asoc->mapping_array_size;
2234 clr = (at >> 3) + 1;
2236 * this should be the allones case but just in case
2239 if (clr > asoc->mapping_array_size)
2240 clr = asoc->mapping_array_size;
2242 memset(asoc->mapping_array, 0, clr);
2243 /* base becomes one ahead of the cum-ack */
2244 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2245 #ifdef SCTP_MAP_LOGGING
2246 sctp_log_map(old_base, old_cumack, old_highest,
2247 SCTP_MAP_PREPARE_SLIDE);
2248 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2249 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2251 } else if (at >= 8) {
2252 /* we can slide the mapping array down */
2253 /* Calculate the new byte postion we can move down */
2254 slide_from = at >> 3;
2256 * now calculate the ceiling of the move using our highest
2259 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2260 lgap = asoc->highest_tsn_inside_map -
2261 asoc->mapping_array_base_tsn;
2263 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2264 asoc->highest_tsn_inside_map + 1;
2266 slide_end = lgap >> 3;
2267 if (slide_end < slide_from) {
2268 panic("impossible slide");
2270 distance = (slide_end - slide_from) + 1;
2271 #ifdef SCTP_MAP_LOGGING
2272 sctp_log_map(old_base, old_cumack, old_highest,
2273 SCTP_MAP_PREPARE_SLIDE);
2274 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2275 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2277 if (distance + slide_from > asoc->mapping_array_size ||
2280 * Here we do NOT slide forward the array so that
2281 * hopefully when more data comes in to fill it up
2282 * we will be able to slide it forward. Really I
2283 * don't think this should happen :-0
2286 #ifdef SCTP_MAP_LOGGING
2287 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2288 (uint32_t) asoc->mapping_array_size,
2289 SCTP_MAP_SLIDE_NONE);
2294 for (ii = 0; ii < distance; ii++) {
2295 asoc->mapping_array[ii] =
2296 asoc->mapping_array[slide_from + ii];
2298 for (ii = distance; ii <= slide_end; ii++) {
2299 asoc->mapping_array[ii] = 0;
2301 asoc->mapping_array_base_tsn += (slide_from << 3);
2302 #ifdef SCTP_MAP_LOGGING
2303 sctp_log_map(asoc->mapping_array_base_tsn,
2304 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2305 SCTP_MAP_SLIDE_RESULT);
2309 /* check the special flag for stream resets */
2310 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2311 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2312 (asoc->cumulative_tsn == liste->tsn))
2315 * we have finished working through the backlogged TSN's now
2316 * time to reset streams. 1: call reset function. 2: free
2317 * pending_reply space 3: distribute any chunks in
2318 * pending_reply_queue.
2320 struct sctp_queued_to_read *ctl;
2322 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2323 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2325 liste = TAILQ_FIRST(&asoc->resetHead);
2326 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2327 if (ctl && (liste == NULL)) {
2328 /* All can be removed */
2330 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2331 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2335 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2338 /* more than one in queue */
2339 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2341 * if ctl->sinfo_tsn is <= liste->tsn we can
2342 * process it which is the NOT of
2343 * ctl->sinfo_tsn > liste->tsn
2345 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2346 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2350 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2354 * Now service re-assembly to pick up anything that has been
2355 * held on reassembly queue?
2357 sctp_deliver_reasm_check(stcb, asoc);
2360 * Now we need to see if we need to queue a sack or just start the
2361 * timer (if allowed).
2364 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2366 * Ok special case, in SHUTDOWN-SENT case. here we
2367 * maker sure SACK timer is off and instead send a
2368 * SHUTDOWN and a SACK
2370 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
2371 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2372 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2374 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2375 sctp_send_sack(stcb);
2379 /* is there a gap now ? */
2380 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2381 stcb->asoc.cumulative_tsn, MAX_TSN);
2384 * CMT DAC algorithm: increase number of packets
2385 * received since last ack
2387 stcb->asoc.cmt_dac_pkts_rcvd++;
2389 if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a
2391 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2393 (stcb->asoc.numduptsns) || /* we have dup's */
2394 (is_a_gap) || /* is still a gap */
2395 (stcb->asoc.delayed_ack == 0) ||
2396 (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second
2400 if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) &&
2401 (stcb->asoc.first_ack_sent == 1) &&
2402 (stcb->asoc.numduptsns == 0) &&
2403 (stcb->asoc.delayed_ack) &&
2404 (!callout_pending(&stcb->asoc.dack_timer.timer))) {
2407 * CMT DAC algorithm: With CMT,
2408 * delay acks even in the face of
2410 * reordering. Therefore, if acks that
2411 * do not have to be sent because of
2412 * the above reasons, will be
2413 * delayed. That is, acks that would
2414 * have been sent due to gap reports
2415 * will be delayed with DAC. Start
2416 * the delayed ack timer.
2418 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2419 stcb->sctp_ep, stcb, NULL);
2422 * Ok we must build a SACK since the
2423 * timer is pending, we got our
2424 * first packet OR there are gaps or
2427 stcb->asoc.first_ack_sent = 1;
2429 sctp_send_sack(stcb);
2430 /* The sending will stop the timer */
2433 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2434 stcb->sctp_ep, stcb, NULL);
2441 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2443 struct sctp_tmit_chunk *chk;
2447 if (asoc->fragmented_delivery_inprogress) {
2448 sctp_service_reassembly(stcb, asoc);
2450 /* Can we proceed further, i.e. the PD-API is complete */
2451 if (asoc->fragmented_delivery_inprogress) {
2456 * Now is there some other chunk I can deliver from the reassembly
2459 chk = TAILQ_FIRST(&asoc->reasmqueue);
2461 asoc->size_on_reasm_queue = 0;
2462 asoc->cnt_on_reasm_queue = 0;
2465 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2466 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2467 ((nxt_todel == chk->rec.data.stream_seq) ||
2468 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2470 * Yep the first one is here. We setup to start reception,
2471 * by backing down the TSN just in case we can't deliver.
2475 * Before we start though either all of the message should
2476 * be here or 1/4 the socket buffer max or nothing on the
2477 * delivery queue and something can be delivered.
2479 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2480 (tsize > stcb->sctp_ep->partial_delivery_point))) {
2481 asoc->fragmented_delivery_inprogress = 1;
2482 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2483 asoc->str_of_pdapi = chk->rec.data.stream_number;
2484 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2485 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2486 asoc->fragment_flags = chk->rec.data.rcv_flags;
2487 sctp_service_reassembly(stcb, asoc);
2492 extern int sctp_strict_data_order;
2495 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2496 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2497 struct sctp_nets *net, uint32_t * high_tsn)
2499 struct sctp_data_chunk *ch, chunk_buf;
2500 struct sctp_association *asoc;
2501 int num_chunks = 0; /* number of control chunks processed */
2503 int chk_length, break_flag, last_chunk;
2504 int abort_flag = 0, was_a_gap = 0;
2508 sctp_set_rwnd(stcb, &stcb->asoc);
2511 SCTP_TCB_LOCK_ASSERT(stcb);
2513 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2514 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2515 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
2517 * wait a minute, this guy is gone, there is no longer a
2518 * receiver. Send peer an ABORT!
2520 struct mbuf *op_err;
2522 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2523 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
2526 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2527 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2528 /* there was a gap before this data was processed */
2532 * setup where we got the last DATA packet from for any SACK that
2533 * may need to go out. Don't bump the net. This is done ONLY when a
2534 * chunk is assigned.
2536 asoc->last_data_chunk_from = net;
2539 * Now before we proceed we must figure out if this is a wasted
2540 * cluster... i.e. it is a small packet sent in and yet the driver
2541 * underneath allocated a full cluster for it. If so we must copy it
2542 * to a smaller mbuf and free up the cluster mbuf. This will help
2543 * with cluster starvation.
2545 if (m->m_len < (long)MHLEN && m->m_next == NULL) {
2546 /* we only handle mbufs that are singletons.. not chains */
2547 m = sctp_get_mbuf_for_msg(m->m_len, 1, M_DONTWAIT, 1, MT_DATA);
2549 /* ok lets see if we can copy the data up */
2552 if ((*mm)->m_flags & M_PKTHDR) {
2553 /* got to copy the header first */
2554 M_MOVE_PKTHDR(m, (*mm));
2556 /* get the pointers and copy */
2557 to = mtod(m, caddr_t *);
2558 from = mtod((*mm), caddr_t *);
2559 memcpy(to, from, (*mm)->m_len);
2560 /* copy the length and free up the old */
2561 m->m_len = (*mm)->m_len;
2563 /* sucess, back copy */
2566 /* We are in trouble in the mbuf world .. yikes */
2570 /* get pointer to the first chunk header */
2571 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2572 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2577 * process all DATA chunks...
2579 *high_tsn = asoc->cumulative_tsn;
2581 while (stop_proc == 0) {
2582 /* validate chunk length */
2583 chk_length = ntohs(ch->ch.chunk_length);
2584 if (length - *offset < chk_length) {
2585 /* all done, mutulated chunk */
2589 if (ch->ch.chunk_type == SCTP_DATA) {
2590 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2592 * Need to send an abort since we had a
2593 * invalid data chunk.
2595 struct mbuf *op_err;
2597 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
2598 0, M_DONTWAIT, 1, MT_DATA);
2601 struct sctp_paramhdr *ph;
2604 op_err->m_len = sizeof(struct sctp_paramhdr) +
2605 (2 * sizeof(uint32_t));
2606 ph = mtod(op_err, struct sctp_paramhdr *);
2608 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2609 ph->param_length = htons(op_err->m_len);
2610 ippp = (uint32_t *) (ph + 1);
2611 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2613 *ippp = asoc->cumulative_tsn;
2616 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2617 sctp_abort_association(inp, stcb, m, iphlen, sh,
2621 #ifdef SCTP_AUDITING_ENABLED
2622 sctp_audit_log(0xB1, 0);
2624 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2629 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2630 chk_length, net, high_tsn, &abort_flag, &break_flag,
2639 * Set because of out of rwnd space and no
2640 * drop rep space left.
2646 /* not a data chunk in the data region */
2647 switch (ch->ch.chunk_type) {
2648 case SCTP_INITIATION:
2649 case SCTP_INITIATION_ACK:
2650 case SCTP_SELECTIVE_ACK:
2651 case SCTP_HEARTBEAT_REQUEST:
2652 case SCTP_HEARTBEAT_ACK:
2653 case SCTP_ABORT_ASSOCIATION:
2655 case SCTP_SHUTDOWN_ACK:
2656 case SCTP_OPERATION_ERROR:
2657 case SCTP_COOKIE_ECHO:
2658 case SCTP_COOKIE_ACK:
2661 case SCTP_SHUTDOWN_COMPLETE:
2662 case SCTP_AUTHENTICATION:
2663 case SCTP_ASCONF_ACK:
2664 case SCTP_PACKET_DROPPED:
2665 case SCTP_STREAM_RESET:
2666 case SCTP_FORWARD_CUM_TSN:
2669 * Now, what do we do with KNOWN chunks that
2670 * are NOT in the right place?
2672 * For now, I do nothing but ignore them. We
2673 * may later want to add sysctl stuff to
2674 * switch out and do either an ABORT() or
2675 * possibly process them.
2677 if (sctp_strict_data_order) {
2678 struct mbuf *op_err;
2680 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2681 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err);
2686 /* unknown chunk type, use bit rules */
2687 if (ch->ch.chunk_type & 0x40) {
2688 /* Add a error report to the queue */
2690 struct sctp_paramhdr *phd;
2692 mm = sctp_get_mbuf_for_msg(sizeof(*phd), 1, M_DONTWAIT, 1, MT_DATA);
2694 phd = mtod(mm, struct sctp_paramhdr *);
2696 * We cheat and use param
2697 * type since we did not
2698 * bother to define a error
2699 * cause struct. They are
2700 * the same basic format
2701 * with different names.
2704 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2706 htons(chk_length + sizeof(*phd));
2707 mm->m_len = sizeof(*phd);
2708 mm->m_next = sctp_m_copym(m, *offset,
2709 SCTP_SIZE32(chk_length),
2713 SCTP_SIZE32(chk_length) +
2715 sctp_queue_op_err(stcb, mm);
2721 if ((ch->ch.chunk_type & 0x80) == 0) {
2722 /* discard the rest of this packet */
2724 } /* else skip this bad chunk and
2727 }; /* switch of chunk type */
2729 *offset += SCTP_SIZE32(chk_length);
2730 if ((*offset >= length) || stop_proc) {
2731 /* no more data left in the mbuf chain */
2735 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2736 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2746 * we need to report rwnd overrun drops.
2748 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2752 * Did we get data, if so update the time for auto-close and
2753 * give peer credit for being alive.
2755 SCTP_STAT_INCR(sctps_recvpktwithdata);
2756 stcb->asoc.overall_error_count = 0;
2757 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2759 /* now service all of the reassm queue if needed */
2760 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2761 sctp_service_queues(stcb, asoc);
2763 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2765 * Assure that we ack right away by making sure that a d-ack
2766 * timer is running. So the sack_check will send a sack.
2768 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb,
2771 /* Start a sack timer or QUEUE a SACK for sending */
2772 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
2773 (stcb->asoc.first_ack_sent)) {
2774 /* Everything is in order */
2775 if (stcb->asoc.mapping_array[0] == 0xff) {
2776 /* need to do the slide */
2777 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2779 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
2780 stcb->asoc.first_ack_sent = 1;
2781 callout_stop(&stcb->asoc.dack_timer.timer);
2782 sctp_send_sack(stcb);
2784 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2785 stcb->sctp_ep, stcb, NULL);
2789 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2798 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
2799 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2800 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, int num_seg, int *ecn_seg_sums)
2802 /************************************************/
2803 /* process fragments and update sendqueue */
2804 /************************************************/
2805 struct sctp_sack *sack;
2806 struct sctp_gap_ack_block *frag;
2807 struct sctp_tmit_chunk *tp1;
2811 #ifdef SCTP_FR_LOGGING
2815 uint16_t frag_strt, frag_end, primary_flag_set;
2816 u_long last_frag_high;
2819 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
2821 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2822 primary_flag_set = 1;
2824 primary_flag_set = 0;
2828 frag = (struct sctp_gap_ack_block *)((caddr_t)sack +
2829 sizeof(struct sctp_sack));
2832 for (i = 0; i < num_seg; i++) {
2833 frag_strt = ntohs(frag->start);
2834 frag_end = ntohs(frag->end);
2835 /* some sanity checks on the fargment offsets */
2836 if (frag_strt > frag_end) {
2837 /* this one is malformed, skip */
2841 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
2843 *biggest_tsn_acked = frag_end + last_tsn;
2845 /* mark acked dgs and find out the highestTSN being acked */
2847 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2849 /* save the locations of the last frags */
2850 last_frag_high = frag_end + last_tsn;
2853 * now lets see if we need to reset the queue due to
2854 * a out-of-order SACK fragment
2856 if (compare_with_wrap(frag_strt + last_tsn,
2857 last_frag_high, MAX_TSN)) {
2859 * if the new frag starts after the last TSN
2860 * frag covered, we are ok and this one is
2861 * beyond the last one
2866 * ok, they have reset us, so we need to
2867 * reset the queue this will cause extra
2868 * hunting but hey, they chose the
2869 * performance hit when they failed to order
2872 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2874 last_frag_high = frag_end + last_tsn;
2876 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2878 #ifdef SCTP_FR_LOGGING
2879 if (tp1->rec.data.doing_fast_retransmit)
2884 * CMT: CUCv2 algorithm. For each TSN being
2885 * processed from the sent queue, track the
2886 * next expected pseudo-cumack, or
2887 * rtx_pseudo_cumack, if required. Separate
2888 * cumack trackers for first transmissions,
2889 * and retransmissions.
2891 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2892 (tp1->snd_count == 1)) {
2893 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2894 tp1->whoTo->find_pseudo_cumack = 0;
2896 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2897 (tp1->snd_count > 1)) {
2898 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2899 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2901 if (tp1->rec.data.TSN_seq == j) {
2902 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2904 * must be held until
2908 * ECN Nonce: Add the nonce
2909 * value to the sender's
2912 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
2925 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2926 *biggest_newly_acked_tsn, MAX_TSN)) {
2927 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2936 * this_sack_highest_
2940 if (tp1->rec.data.chunk_was_revoked == 0)
2941 tp1->whoTo->saw_newack = 1;
2943 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2944 tp1->whoTo->this_sack_highest_newack,
2946 tp1->whoTo->this_sack_highest_newack =
2947 tp1->rec.data.TSN_seq;
2952 * this_sack_lowest_n
2955 if (*this_sack_lowest_newack == 0) {
2956 #ifdef SCTP_SACK_LOGGING
2957 sctp_log_sack(*this_sack_lowest_newack,
2959 tp1->rec.data.TSN_seq,
2962 SCTP_LOG_TSN_ACKED);
2964 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2969 * (rtx-)pseudo-cumac
2974 * (rtx-)pseudo-cumac
2976 * new_(rtx_)pseudo_c
2984 * (rtx-)pseudo-cumac
2992 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2993 if (tp1->rec.data.chunk_was_revoked == 0) {
2994 tp1->whoTo->new_pseudo_cumack = 1;
2996 tp1->whoTo->find_pseudo_cumack = 1;
2998 #ifdef SCTP_CWND_LOGGING
2999 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3001 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
3002 if (tp1->rec.data.chunk_was_revoked == 0) {
3003 tp1->whoTo->new_pseudo_cumack = 1;
3005 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3007 #ifdef SCTP_SACK_LOGGING
3008 sctp_log_sack(*biggest_newly_acked_tsn,
3010 tp1->rec.data.TSN_seq,
3013 SCTP_LOG_TSN_ACKED);
3015 #ifdef SCTP_FLIGHT_LOGGING
3016 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
3017 tp1->whoTo->flight_size,
3020 tp1->rec.data.TSN_seq);
3022 if (tp1->whoTo->flight_size >= tp1->book_size)
3023 tp1->whoTo->flight_size -= tp1->book_size;
3025 tp1->whoTo->flight_size = 0;
3026 if (asoc->total_flight >= tp1->book_size) {
3027 asoc->total_flight -= tp1->book_size;
3028 if (asoc->total_flight_count > 0)
3029 asoc->total_flight_count--;
3031 asoc->total_flight = 0;
3032 asoc->total_flight_count = 0;
3035 tp1->whoTo->net_ack += tp1->send_size;
3037 if (tp1->snd_count < 2) {
3043 tp1->whoTo->net_ack2 += tp1->send_size;
3050 sctp_calculate_rto(stcb,
3053 &tp1->sent_rcv_time);
3054 tp1->whoTo->rto_pending = 0;
3059 if (tp1->sent <= SCTP_DATAGRAM_RESEND &&
3060 tp1->sent != SCTP_DATAGRAM_UNSENT &&
3061 compare_with_wrap(tp1->rec.data.TSN_seq,
3062 asoc->this_sack_highest_gap,
3064 asoc->this_sack_highest_gap =
3065 tp1->rec.data.TSN_seq;
3067 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3068 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3069 #ifdef SCTP_AUDITING_ENABLED
3070 sctp_audit_log(0xB2,
3071 (asoc->sent_queue_retran_cnt & 0x000000ff));
3075 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3076 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3078 tp1->sent = SCTP_DATAGRAM_MARKED;
3081 } /* if (tp1->TSN_seq == j) */
3082 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
3086 tp1 = TAILQ_NEXT(tp1, sctp_next);
3087 } /* end while (tp1) */
3088 } /* end for (j = fragStart */
3089 frag++; /* next one */
3091 #ifdef SCTP_FR_LOGGING
3093 * if (num_frs) sctp_log_fr(*biggest_tsn_acked,
3094 * *biggest_newly_acked_tsn, last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3100 sctp_check_for_revoked(struct sctp_association *asoc, uint32_t cumack,
3101 u_long biggest_tsn_acked)
3103 struct sctp_tmit_chunk *tp1;
3104 int tot_revoked = 0;
3106 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3108 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3111 * ok this guy is either ACK or MARKED. If it is
3112 * ACKED it has been previously acked but not this
3113 * time i.e. revoked. If it is MARKED it was ACK'ed
3116 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3117 /* it has been revoked */
3118 tp1->sent = SCTP_DATAGRAM_SENT;
3119 tp1->rec.data.chunk_was_revoked = 1;
3121 * We must add this stuff back in to assure
3122 * timers and such get started.
3124 tp1->whoTo->flight_size += tp1->book_size;
3125 asoc->total_flight_count++;
3126 asoc->total_flight += tp1->book_size;
3128 #ifdef SCTP_SACK_LOGGING
3129 sctp_log_sack(asoc->last_acked_seq,
3131 tp1->rec.data.TSN_seq,
3134 SCTP_LOG_TSN_REVOKED);
3136 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3137 /* it has been re-acked in this SACK */
3138 tp1->sent = SCTP_DATAGRAM_ACKED;
3141 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3143 tp1 = TAILQ_NEXT(tp1, sctp_next);
3145 if (tot_revoked > 0) {
3147 * Setup the ecn nonce re-sync point. We do this since once
3148 * data is revoked we begin to retransmit things, which do
3149 * NOT have the ECN bits set. This means we are now out of
3150 * sync and must wait until we get back in sync with the
3151 * peer to check ECN bits.
3153 tp1 = TAILQ_FIRST(&asoc->send_queue);
3155 asoc->nonce_resync_tsn = asoc->sending_seq;
3157 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3159 asoc->nonce_wait_for_ecne = 0;
3160 asoc->nonce_sum_check = 0;
3164 extern int sctp_peer_chunk_oh;
3167 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3168 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3170 struct sctp_tmit_chunk *tp1;
3171 int strike_flag = 0;
3173 int tot_retrans = 0;
3174 uint32_t sending_seq;
3175 struct sctp_nets *net;
3176 int num_dests_sacked = 0;
3179 * select the sending_seq, this is either the next thing ready to be
3180 * sent but not transmitted, OR, the next seq we assign.
3182 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3184 sending_seq = asoc->sending_seq;
3186 sending_seq = tp1->rec.data.TSN_seq;
3189 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3190 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3191 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3192 if (net->saw_newack)
3196 if (stcb->asoc.peer_supports_prsctp) {
3197 SCTP_GETTIME_TIMEVAL(&now);
3199 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3202 if (tp1->no_fr_allowed) {
3203 /* this one had a timeout or something */
3204 tp1 = TAILQ_NEXT(tp1, sctp_next);
3207 #ifdef SCTP_FR_LOGGING
3208 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3209 sctp_log_fr(biggest_tsn_newly_acked,
3210 tp1->rec.data.TSN_seq,
3212 SCTP_FR_LOG_CHECK_STRIKE);
3214 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3216 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3220 if (stcb->asoc.peer_supports_prsctp) {
3221 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3222 /* Is it expired? */
3223 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3224 /* Yes so drop it */
3225 if (tp1->data != NULL) {
3226 sctp_release_pr_sctp_chunk(stcb, tp1,
3227 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3230 tp1 = TAILQ_NEXT(tp1, sctp_next);
3234 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3235 /* Has it been retransmitted tv_sec times? */
3236 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3237 /* Yes, so drop it */
3238 if (tp1->data != NULL) {
3239 sctp_release_pr_sctp_chunk(stcb, tp1,
3240 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3243 tp1 = TAILQ_NEXT(tp1, sctp_next);
3248 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3249 asoc->this_sack_highest_gap, MAX_TSN)) {
3250 /* we are beyond the tsn in the sack */
3253 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3254 /* either a RESEND, ACKED, or MARKED */
3256 tp1 = TAILQ_NEXT(tp1, sctp_next);
3260 * CMT : SFR algo (covers part of DAC and HTNA as well)
3262 if (tp1->whoTo->saw_newack == 0) {
3264 * No new acks were receieved for data sent to this
3265 * dest. Therefore, according to the SFR algo for
3266 * CMT, no data sent to this dest can be marked for
3267 * FR using this SACK. (iyengar@cis.udel.edu,
3270 tp1 = TAILQ_NEXT(tp1, sctp_next);
3272 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3273 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3275 * CMT: New acks were receieved for data sent to
3276 * this dest. But no new acks were seen for data
3277 * sent after tp1. Therefore, according to the SFR
3278 * algo for CMT, tp1 cannot be marked for FR using
3279 * this SACK. This step covers part of the DAC algo
3280 * and the HTNA algo as well.
3282 tp1 = TAILQ_NEXT(tp1, sctp_next);
3286 * Here we check to see if we were have already done a FR
3287 * and if so we see if the biggest TSN we saw in the sack is
3288 * smaller than the recovery point. If so we don't strike
3289 * the tsn... otherwise we CAN strike the TSN.
3292 * @@@ JRI: Check for CMT
3294 if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3296 * Strike the TSN if in fast-recovery and cum-ack
3299 #ifdef SCTP_FR_LOGGING
3300 sctp_log_fr(biggest_tsn_newly_acked,
3301 tp1->rec.data.TSN_seq,
3303 SCTP_FR_LOG_STRIKE_CHUNK);
3306 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3308 * CMT DAC algorithm: If SACK flag is set to
3309 * 0, then lowest_newack test will not pass
3310 * because it would have been set to the
3311 * cumack earlier. If not already to be
3312 * rtx'd, If not a mixed sack and if tp1 is
3313 * not between two sacked TSNs, then mark by
3316 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3317 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3318 #ifdef SCTP_FR_LOGGING
3319 sctp_log_fr(16 + num_dests_sacked,
3320 tp1->rec.data.TSN_seq,
3322 SCTP_FR_LOG_STRIKE_CHUNK);
3327 } else if (tp1->rec.data.doing_fast_retransmit) {
3329 * For those that have done a FR we must take
3330 * special consideration if we strike. I.e the
3331 * biggest_newly_acked must be higher than the
3332 * sending_seq at the time we did the FR.
3334 #ifdef SCTP_FR_TO_ALTERNATE
3336 * If FR's go to new networks, then we must only do
3337 * this for singly homed asoc's. However if the FR's
3338 * go to the same network (Armando's work) then its
3339 * ok to FR multiple times.
3341 if (asoc->numnets < 2)
3346 if ((compare_with_wrap(biggest_tsn_newly_acked,
3347 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3348 (biggest_tsn_newly_acked ==
3349 tp1->rec.data.fast_retran_tsn)) {
3351 * Strike the TSN, since this ack is
3352 * beyond where things were when we
3355 #ifdef SCTP_FR_LOGGING
3356 sctp_log_fr(biggest_tsn_newly_acked,
3357 tp1->rec.data.TSN_seq,
3359 SCTP_FR_LOG_STRIKE_CHUNK);
3363 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3365 * CMT DAC algorithm: If
3366 * SACK flag is set to 0,
3367 * then lowest_newack test
3368 * will not pass because it
3369 * would have been set to
3370 * the cumack earlier. If
3371 * not already to be rtx'd,
3372 * If not a mixed sack and
3373 * if tp1 is not between two
3374 * sacked TSNs, then mark by
3377 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3378 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3379 #ifdef SCTP_FR_LOGGING
3380 sctp_log_fr(32 + num_dests_sacked,
3381 tp1->rec.data.TSN_seq,
3383 SCTP_FR_LOG_STRIKE_CHUNK);
3391 * @@@ JRI: TODO: remove code for HTNA algo. CMT's
3392 * SFR algo covers HTNA.
3394 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3395 biggest_tsn_newly_acked, MAX_TSN)) {
3397 * We don't strike these: This is the HTNA
3398 * algorithm i.e. we don't strike If our TSN is
3399 * larger than the Highest TSN Newly Acked.
3403 /* Strike the TSN */
3404 #ifdef SCTP_FR_LOGGING
3405 sctp_log_fr(biggest_tsn_newly_acked,
3406 tp1->rec.data.TSN_seq,
3408 SCTP_FR_LOG_STRIKE_CHUNK);
3411 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3413 * CMT DAC algorithm: If SACK flag is set to
3414 * 0, then lowest_newack test will not pass
3415 * because it would have been set to the
3416 * cumack earlier. If not already to be
3417 * rtx'd, If not a mixed sack and if tp1 is
3418 * not between two sacked TSNs, then mark by
3421 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3422 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3423 #ifdef SCTP_FR_LOGGING
3424 sctp_log_fr(48 + num_dests_sacked,
3425 tp1->rec.data.TSN_seq,
3427 SCTP_FR_LOG_STRIKE_CHUNK);
3433 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3434 /* Increment the count to resend */
3435 struct sctp_nets *alt;
3437 /* printf("OK, we are now ready to FR this guy\n"); */
3438 #ifdef SCTP_FR_LOGGING
3439 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3443 /* This is a subsequent FR */
3444 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3446 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3448 if (sctp_cmt_on_off) {
3450 * CMT: Using RTX_SSTHRESH policy for CMT.
3451 * If CMT is being used, then pick dest with
3452 * largest ssthresh for any retransmission.
3453 * (iyengar@cis.udel.edu, 2005/08/12)
3455 tp1->no_fr_allowed = 1;
3457 alt = sctp_find_alternate_net(stcb, alt, 1);
3459 * CUCv2: If a different dest is picked for
3460 * the retransmission, then new
3461 * (rtx-)pseudo_cumack needs to be tracked
3462 * for orig dest. Let CUCv2 track new (rtx-)
3463 * pseudo-cumack always.
3465 tp1->whoTo->find_pseudo_cumack = 1;
3466 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3469 } else {/* CMT is OFF */
3471 #ifdef SCTP_FR_TO_ALTERNATE
3472 /* Can we find an alternate? */
3473 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3476 * default behavior is to NOT retransmit
3477 * FR's to an alternate. Armando Caro's
3478 * paper details why.
3484 tp1->rec.data.doing_fast_retransmit = 1;
3486 /* mark the sending seq for possible subsequent FR's */
3488 * printf("Marking TSN for FR new value %x\n",
3489 * (uint32_t)tpi->rec.data.TSN_seq);
3491 if (TAILQ_EMPTY(&asoc->send_queue)) {
3493 * If the queue of send is empty then its
3494 * the next sequence number that will be
3495 * assigned so we subtract one from this to
3496 * get the one we last sent.
3498 tp1->rec.data.fast_retran_tsn = sending_seq;
3501 * If there are chunks on the send queue
3502 * (unsent data that has made it from the
3503 * stream queues but not out the door, we
3504 * take the first one (which will have the
3505 * lowest TSN) and subtract one to get the
3508 struct sctp_tmit_chunk *ttt;
3510 ttt = TAILQ_FIRST(&asoc->send_queue);
3511 tp1->rec.data.fast_retran_tsn =
3512 ttt->rec.data.TSN_seq;
3517 * this guy had a RTO calculation pending on
3520 tp1->whoTo->rto_pending = 0;
3523 /* fix counts and things */
3524 #ifdef SCTP_FLIGHT_LOGGING
3525 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
3526 tp1->whoTo->flight_size,
3529 tp1->rec.data.TSN_seq);
3531 tp1->whoTo->net_ack++;
3532 if (tp1->whoTo->flight_size >= tp1->book_size)
3533 tp1->whoTo->flight_size -= tp1->book_size;
3535 tp1->whoTo->flight_size = 0;
3537 #ifdef SCTP_LOG_RWND
3538 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3539 asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh);
3541 /* add back to the rwnd */
3542 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
3544 /* remove from the total flight */
3545 if (asoc->total_flight >= tp1->book_size) {
3546 asoc->total_flight -= tp1->book_size;
3547 if (asoc->total_flight_count > 0)
3548 asoc->total_flight_count--;
3550 asoc->total_flight = 0;
3551 asoc->total_flight_count = 0;
3555 if (alt != tp1->whoTo) {
3556 /* yes, there is an alternate. */
3557 sctp_free_remote_addr(tp1->whoTo);
3559 atomic_add_int(&alt->ref_count, 1);
3562 tp1 = TAILQ_NEXT(tp1, sctp_next);
3565 if (tot_retrans > 0) {
3567 * Setup the ecn nonce re-sync point. We do this since once
3568 * we go to FR something we introduce a Karn's rule scenario
3569 * and won't know the totals for the ECN bits.
3571 asoc->nonce_resync_tsn = sending_seq;
3572 asoc->nonce_wait_for_ecne = 0;
3573 asoc->nonce_sum_check = 0;
3577 struct sctp_tmit_chunk *
3578 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3579 struct sctp_association *asoc)
3581 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3585 if (asoc->peer_supports_prsctp == 0) {
3588 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3590 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3591 tp1->sent != SCTP_DATAGRAM_RESEND) {
3592 /* no chance to advance, out of here */
3595 if (!PR_SCTP_ENABLED(tp1->flags)) {
3597 * We can't fwd-tsn past any that are reliable aka
3598 * retransmitted until the asoc fails.
3603 SCTP_GETTIME_TIMEVAL(&now);
3606 tp2 = TAILQ_NEXT(tp1, sctp_next);
3608 * now we got a chunk which is marked for another
3609 * retransmission to a PR-stream but has run out its chances
3610 * already maybe OR has been marked to skip now. Can we skip
3611 * it if its a resend?
3613 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3614 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3616 * Now is this one marked for resend and its time is
3619 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3620 /* Yes so drop it */
3622 sctp_release_pr_sctp_chunk(stcb, tp1,
3623 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3628 * No, we are done when hit one for resend
3629 * whos time as not expired.
3635 * Ok now if this chunk is marked to drop it we can clean up
3636 * the chunk, advance our peer ack point and we can check
3639 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3640 /* advance PeerAckPoint goes forward */
3641 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3644 * we don't want to de-queue it here. Just wait for
3645 * the next peer SACK to come with a new cumTSN and
3646 * then the chunk will be droped in the normal
3650 sctp_free_bufspace(stcb, asoc, tp1, 1);
3652 * Maybe there should be another
3655 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3656 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3658 sctp_m_freem(tp1->data);
3660 if (stcb->sctp_socket) {
3661 sctp_sowwakeup(stcb->sctp_ep,
3663 #ifdef SCTP_WAKE_LOGGING
3664 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN);
3670 * If it is still in RESEND we can advance no
3676 * If we hit here we just dumped tp1, move to next tsn on
3684 #ifdef SCTP_HIGH_SPEED
3685 struct sctp_hs_raise_drop {
3688 int32_t drop_percent;
3691 #define SCTP_HS_TABLE_SIZE 73
3693 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
3694 {38, 1, 50}, /* 0 */
3695 {118, 2, 44}, /* 1 */
3696 {221, 3, 41}, /* 2 */
3697 {347, 4, 38}, /* 3 */
3698 {495, 5, 37}, /* 4 */
3699 {663, 6, 35}, /* 5 */
3700 {851, 7, 34}, /* 6 */
3701 {1058, 8, 33}, /* 7 */
3702 {1284, 9, 32}, /* 8 */
3703 {1529, 10, 31}, /* 9 */
3704 {1793, 11, 30}, /* 10 */
3705 {2076, 12, 29}, /* 11 */
3706 {2378, 13, 28}, /* 12 */
3707 {2699, 14, 28}, /* 13 */
3708 {3039, 15, 27}, /* 14 */
3709 {3399, 16, 27}, /* 15 */
3710 {3778, 17, 26}, /* 16 */
3711 {4177, 18, 26}, /* 17 */
3712 {4596, 19, 25}, /* 18 */
3713 {5036, 20, 25}, /* 19 */
3714 {5497, 21, 24}, /* 20 */
3715 {5979, 22, 24}, /* 21 */
3716 {6483, 23, 23}, /* 22 */
3717 {7009, 24, 23}, /* 23 */
3718 {7558, 25, 22}, /* 24 */
3719 {8130, 26, 22}, /* 25 */
3720 {8726, 27, 22}, /* 26 */
3721 {9346, 28, 21}, /* 27 */
3722 {9991, 29, 21}, /* 28 */
3723 {10661, 30, 21}, /* 29 */
3724 {11358, 31, 20}, /* 30 */
3725 {12082, 32, 20}, /* 31 */
3726 {12834, 33, 20}, /* 32 */
3727 {13614, 34, 19}, /* 33 */
3728 {14424, 35, 19}, /* 34 */
3729 {15265, 36, 19}, /* 35 */
3730 {16137, 37, 19}, /* 36 */
3731 {17042, 38, 18}, /* 37 */
3732 {17981, 39, 18}, /* 38 */
3733 {18955, 40, 18}, /* 39 */
3734 {19965, 41, 17}, /* 40 */
3735 {21013, 42, 17}, /* 41 */
3736 {22101, 43, 17}, /* 42 */
3737 {23230, 44, 17}, /* 43 */
3738 {24402, 45, 16}, /* 44 */
3739 {25618, 46, 16}, /* 45 */
3740 {26881, 47, 16}, /* 46 */
3741 {28193, 48, 16}, /* 47 */
3742 {29557, 49, 15}, /* 48 */
3743 {30975, 50, 15}, /* 49 */
3744 {32450, 51, 15}, /* 50 */
3745 {33986, 52, 15}, /* 51 */
3746 {35586, 53, 14}, /* 52 */
3747 {37253, 54, 14}, /* 53 */
3748 {38992, 55, 14}, /* 54 */
3749 {40808, 56, 14}, /* 55 */
3750 {42707, 57, 13}, /* 56 */
3751 {44694, 58, 13}, /* 57 */
3752 {46776, 59, 13}, /* 58 */
3753 {48961, 60, 13}, /* 59 */
3754 {51258, 61, 13}, /* 60 */
3755 {53677, 62, 12}, /* 61 */
3756 {56230, 63, 12}, /* 62 */
3757 {58932, 64, 12}, /* 63 */
3758 {61799, 65, 12}, /* 64 */
3759 {64851, 66, 11}, /* 65 */
3760 {68113, 67, 11}, /* 66 */
3761 {71617, 68, 11}, /* 67 */
3762 {75401, 69, 10}, /* 68 */
3763 {79517, 70, 10}, /* 69 */
3764 {84035, 71, 10}, /* 70 */
3765 {89053, 72, 10}, /* 71 */
3766 {94717, 73, 9} /* 72 */
3770 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
3772 int cur_val, i, indx, incr;
3774 cur_val = net->cwnd >> 10;
3775 indx = SCTP_HS_TABLE_SIZE - 1;
3777 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3779 if (net->net_ack > net->mtu) {
3780 net->cwnd += net->mtu;
3781 #ifdef SCTP_CWND_MONITOR
3782 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS);
3785 net->cwnd += net->net_ack;
3786 #ifdef SCTP_CWND_MONITOR
3787 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
3791 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
3792 if (cur_val < sctp_cwnd_adjust[i].cwnd) {
3797 net->last_hs_used = indx;
3798 incr = ((sctp_cwnd_adjust[indx].increase) << 10);
3800 #ifdef SCTP_CWND_MONITOR
3801 sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS);
3807 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
3809 int cur_val, i, indx;
3811 #ifdef SCTP_CWND_MONITOR
3812 int old_cwnd = net->cwnd;
3816 cur_val = net->cwnd >> 10;
3817 indx = net->last_hs_used;
3818 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3820 net->ssthresh = net->cwnd / 2;
3821 if (net->ssthresh < (net->mtu * 2)) {
3822 net->ssthresh = 2 * net->mtu;
3824 net->cwnd = net->ssthresh;
3826 /* drop by the proper amount */
3827 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
3828 sctp_cwnd_adjust[net->last_hs_used].drop_percent);
3829 net->cwnd = net->ssthresh;
3830 /* now where are we */
3831 indx = net->last_hs_used;
3832 cur_val = net->cwnd >> 10;
3833 /* reset where we are in the table */
3834 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3835 /* feel out of hs */
3836 net->last_hs_used = 0;
3838 for (i = indx; i >= 1; i--) {
3839 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
3843 net->last_hs_used = indx;
3846 #ifdef SCTP_CWND_MONITOR
3847 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
3854 extern int sctp_early_fr;
3855 extern int sctp_L2_abc_variable;
3858 static __inline void
3859 sctp_cwnd_update(struct sctp_tcb *stcb,
3860 struct sctp_association *asoc,
3861 int accum_moved, int reneged_all, int will_exit)
3863 struct sctp_nets *net;
3865 /******************************/
3866 /* update cwnd and Early FR */
3867 /******************************/
3868 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3869 #ifdef JANA_CODE_WHY_THIS
3871 * CMT fast recovery code. Need to debug.
3873 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
3874 if (compare_with_wrap(asoc->last_acked_seq,
3875 net->fast_recovery_tsn, MAX_TSN) ||
3876 (asoc->last_acked_seq == net->fast_recovery_tsn) ||
3877 compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
3878 (net->pseudo_cumack == net->fast_recovery_tsn)) {
3879 net->will_exit_fast_recovery = 1;
3883 if (sctp_early_fr) {
3885 * So, first of all do we need to have a Early FR
3888 if (((TAILQ_FIRST(&asoc->sent_queue)) &&
3889 (net->ref_count > 1) &&
3890 (net->flight_size < net->cwnd)) ||
3893 * yes, so in this case stop it if its
3894 * running, and then restart it. Reneging
3895 * all is a special case where we want to
3896 * run the Early FR timer and then force the
3897 * last few unacked to be sent, causing us
3898 * to illicit a sack with gaps to force out
3901 if (callout_pending(&net->fr_timer.timer)) {
3902 SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
3903 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
3904 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
3906 SCTP_STAT_INCR(sctps_earlyfrstrid);
3907 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
3909 /* No, stop it if its running */
3910 if (callout_pending(&net->fr_timer.timer)) {
3911 SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
3912 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
3913 SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
3917 /* if nothing was acked on this destination skip it */
3918 if (net->net_ack == 0) {
3919 #ifdef SCTP_CWND_LOGGING
3920 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
3924 if (net->net_ack2 > 0) {
3926 * Karn's rule applies to clearing error count, this
3929 net->error_count = 0;
3930 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
3931 SCTP_ADDR_NOT_REACHABLE) {
3932 /* addr came good */
3933 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
3934 net->dest_state |= SCTP_ADDR_REACHABLE;
3935 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3936 SCTP_RECEIVED_SACK, (void *)net);
3937 /* now was it the primary? if so restore */
3938 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
3939 sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
3943 #ifdef JANA_CODE_WHY_THIS
3945 * Cannot skip for CMT. Need to come back and check these
3946 * variables for CMT. CMT fast recovery code. Need to debug.
3948 if (sctp_cmt_on_off == 1 &&
3949 net->fast_retran_loss_recovery &&
3950 net->will_exit_fast_recovery == 0)
3952 if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
3954 * If we are in loss recovery we skip any
3957 goto skip_cwnd_update;
3960 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
3963 if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) {
3964 /* If the cumulative ack moved we can proceed */
3965 if (net->cwnd <= net->ssthresh) {
3966 /* We are in slow start */
3967 if (net->flight_size + net->net_ack >=
3969 #ifdef SCTP_HIGH_SPEED
3970 sctp_hs_cwnd_increase(stcb, net);
3972 if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) {
3973 net->cwnd += (net->mtu * sctp_L2_abc_variable);
3974 #ifdef SCTP_CWND_MONITOR
3975 sctp_log_cwnd(stcb, net, net->mtu,
3976 SCTP_CWND_LOG_FROM_SS);
3980 net->cwnd += net->net_ack;
3981 #ifdef SCTP_CWND_MONITOR
3982 sctp_log_cwnd(stcb, net, net->net_ack,
3983 SCTP_CWND_LOG_FROM_SS);
3991 dif = net->cwnd - (net->flight_size +
3993 #ifdef SCTP_CWND_LOGGING
3994 sctp_log_cwnd(stcb, net, net->net_ack,
3995 SCTP_CWND_LOG_NOADV_SS);
3999 /* We are in congestion avoidance */
4000 if (net->flight_size + net->net_ack >=
4003 * add to pba only if we had a
4004 * cwnd's worth (or so) in flight OR
4005 * the burst limit was applied.
4007 net->partial_bytes_acked +=
4011 * Do we need to increase (if pba is
4014 if (net->partial_bytes_acked >=
4017 net->partial_bytes_acked) {
4018 net->partial_bytes_acked -=
4021 net->partial_bytes_acked =
4024 net->cwnd += net->mtu;
4025 #ifdef SCTP_CWND_MONITOR
4026 sctp_log_cwnd(stcb, net, net->mtu,
4027 SCTP_CWND_LOG_FROM_CA);
4030 #ifdef SCTP_CWND_LOGGING
4032 sctp_log_cwnd(stcb, net, net->net_ack,
4033 SCTP_CWND_LOG_NOADV_CA);
4039 #ifdef SCTP_CWND_LOGGING
4040 sctp_log_cwnd(stcb, net, net->net_ack,
4041 SCTP_CWND_LOG_NOADV_CA);
4043 dif = net->cwnd - (net->flight_size +
4048 #ifdef SCTP_CWND_LOGGING
4049 sctp_log_cwnd(stcb, net, net->mtu,
4050 SCTP_CWND_LOG_NO_CUMACK);
4055 * NOW, according to Karn's rule do we need to restore the
4056 * RTO timer back? Check our net_ack2. If not set then we
4057 * have a ambiguity.. i.e. all data ack'd was sent to more
4060 if (net->net_ack2) {
4061 /* restore any doubled timers */
4062 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
4063 if (net->RTO < stcb->asoc.minrto) {
4064 net->RTO = stcb->asoc.minrto;
4066 if (net->RTO > stcb->asoc.maxrto) {
4067 net->RTO = stcb->asoc.maxrto;
4075 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4076 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
4078 struct sctp_nets *net;
4079 struct sctp_association *asoc;
4080 struct sctp_tmit_chunk *tp1, *tp2;
4083 SCTP_TCB_LOCK_ASSERT(stcb);
4085 /* First setup for CC stuff */
4086 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4087 net->prev_cwnd = net->cwnd;
4091 asoc->this_sack_highest_gap = cumack;
4092 stcb->asoc.overall_error_count = 0;
4093 /* process the new consecutive TSN first */
4094 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4096 tp2 = TAILQ_NEXT(tp1, sctp_next);
4097 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
4099 cumack == tp1->rec.data.TSN_seq) {
4100 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4102 * ECN Nonce: Add the nonce to the sender's
4105 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4106 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4108 * If it is less than ACKED, it is
4109 * now no-longer in flight. Higher
4110 * values may occur during marking
4112 #ifdef SCTP_FLIGHT_LOGGING
4113 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
4114 tp1->whoTo->flight_size,
4117 tp1->rec.data.TSN_seq);
4120 if (tp1->whoTo->flight_size >= tp1->book_size) {
4121 tp1->whoTo->flight_size -= tp1->book_size;
4123 tp1->whoTo->flight_size = 0;
4125 if (asoc->total_flight >= tp1->book_size) {
4126 asoc->total_flight -= tp1->book_size;
4127 if (asoc->total_flight_count > 0)
4128 asoc->total_flight_count--;
4130 asoc->total_flight = 0;
4131 asoc->total_flight_count = 0;
4133 tp1->whoTo->net_ack += tp1->send_size;
4134 if (tp1->snd_count < 2) {
4136 * True non-retransmited
4139 tp1->whoTo->net_ack2 +=
4142 /* update RTO too? */
4143 if ((tp1->do_rtt) && (tp1->whoTo->rto_pending)) {
4145 sctp_calculate_rto(stcb,
4147 &tp1->sent_rcv_time);
4148 tp1->whoTo->rto_pending = 0;
4152 #ifdef SCTP_CWND_LOGGING
4153 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4156 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4157 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4159 tp1->sent = SCTP_DATAGRAM_ACKED;
4164 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4166 sctp_free_bufspace(stcb, asoc, tp1, 1);
4167 sctp_m_freem(tp1->data);
4169 #ifdef SCTP_SACK_LOGGING
4170 sctp_log_sack(asoc->last_acked_seq,
4172 tp1->rec.data.TSN_seq,
4175 SCTP_LOG_FREE_SENT);
4178 asoc->sent_queue_cnt--;
4179 sctp_free_remote_addr(tp1->whoTo);
4180 sctp_free_a_chunk(stcb, tp1);
4183 if (stcb->sctp_socket) {
4184 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4185 #ifdef SCTP_WAKE_LOGGING
4186 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4188 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4189 #ifdef SCTP_WAKE_LOGGING
4191 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4195 if (asoc->last_acked_seq != cumack)
4196 sctp_cwnd_update(stcb, asoc, 1, 0, 0);
4197 asoc->last_acked_seq = cumack;
4198 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4199 /* nothing left in-flight */
4200 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4201 net->flight_size = 0;
4202 net->partial_bytes_acked = 0;
4204 asoc->total_flight = 0;
4205 asoc->total_flight_count = 0;
4207 /* Fix up the a-p-a-p for future PR-SCTP sends */
4208 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4209 asoc->advanced_peer_ack_point = cumack;
4211 /* ECN Nonce updates */
4212 if (asoc->ecn_nonce_allowed) {
4213 if (asoc->nonce_sum_check) {
4214 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4215 if (asoc->nonce_wait_for_ecne == 0) {
4216 struct sctp_tmit_chunk *lchk;
4218 lchk = TAILQ_FIRST(&asoc->send_queue);
4219 asoc->nonce_wait_for_ecne = 1;
4221 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4223 asoc->nonce_wait_tsn = asoc->sending_seq;
4226 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4227 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4229 * Misbehaving peer. We need
4230 * to react to this guy
4232 asoc->ecn_allowed = 0;
4233 asoc->ecn_nonce_allowed = 0;
4238 /* See if Resynchronization Possible */
4239 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4240 asoc->nonce_sum_check = 1;
4242 * now we must calculate what the base is.
4243 * We do this based on two things, we know
4244 * the total's for all the segments
4245 * gap-acked in the SACK (none), We also
4246 * know the SACK's nonce sum, its in
4247 * nonce_sum_flag. So we can build a truth
4248 * table to back-calculate the new value of
4249 * asoc->nonce_sum_expect_base:
4251 * SACK-flag-Value Seg-Sums Base 0 0 0
4255 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4260 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4261 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4262 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4263 /* SWS sender side engages */
4264 asoc->peers_rwnd = 0;
4266 /* Now assure a timer where data is queued at */
4269 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4270 if (net->flight_size) {
4273 if (net->RTO == 0) {
4274 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4276 to_ticks = MSEC_TO_TICKS(net->RTO);
4279 callout_reset(&net->rxt_timer.timer, to_ticks,
4280 sctp_timeout_handler, &net->rxt_timer);
4282 if (callout_pending(&net->rxt_timer.timer)) {
4283 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4285 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4287 if (sctp_early_fr) {
4288 if (callout_pending(&net->fr_timer.timer)) {
4289 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4290 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4291 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4296 if ((j == 0) && (!TAILQ_EMPTY(&asoc->sent_queue)) && (asoc->sent_queue_retran_cnt == 0)) {
4297 /* huh, this should not happen */
4299 panic("Flight size incorrect? fixing??");
4301 printf("Flight size incorrect? fixing\n");
4302 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4303 net->flight_size = 0;
4305 asoc->total_flight = 0;
4306 asoc->total_flight_count = 0;
4307 asoc->sent_queue_retran_cnt = 0;
4308 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4309 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4310 tp1->whoTo->flight_size += tp1->book_size;
4311 asoc->total_flight += tp1->book_size;
4312 asoc->total_flight_count++;
4313 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4314 asoc->sent_queue_retran_cnt++;
4320 /**********************************/
4321 /* Now what about shutdown issues */
4322 /**********************************/
4323 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4324 /* nothing left on sendqueue.. consider done */
4326 if ((asoc->stream_queue_cnt == 1) &&
4327 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4328 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4329 (asoc->locked_on_sending)
4331 struct sctp_stream_queue_pending *sp;
4334 * I may be in a state where we got all across.. but
4335 * cannot write more due to a shutdown... we abort
4336 * since the user did not indicate EOR in this case.
4337 * The sp will be cleaned during free of the asoc.
4339 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4341 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) {
4342 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4343 asoc->locked_on_sending = NULL;
4344 asoc->stream_queue_cnt--;
4347 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4348 (asoc->stream_queue_cnt == 0)) {
4349 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4350 /* Need to abort here */
4356 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4357 0, M_DONTWAIT, 1, MT_DATA);
4359 struct sctp_paramhdr *ph;
4362 oper->m_len = sizeof(struct sctp_paramhdr) +
4364 ph = mtod(oper, struct sctp_paramhdr *);
4365 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4366 ph->param_length = htons(oper->m_len);
4367 ippp = (uint32_t *) (ph + 1);
4368 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4370 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4371 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper);
4373 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4374 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4375 sctp_stop_timers_for_shutdown(stcb);
4376 sctp_send_shutdown(stcb,
4377 stcb->asoc.primary_destination);
4378 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4379 stcb->sctp_ep, stcb, asoc->primary_destination);
4380 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4381 stcb->sctp_ep, stcb, asoc->primary_destination);
4383 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4384 (asoc->stream_queue_cnt == 0)) {
4385 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4388 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
4389 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4390 sctp_send_shutdown_ack(stcb,
4391 stcb->asoc.primary_destination);
4393 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4394 stcb->sctp_ep, stcb, asoc->primary_destination);
4397 #ifdef SCTP_SACK_RWND_LOGGING
4398 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4400 stcb->asoc.peers_rwnd,
4401 stcb->asoc.total_flight,
4402 stcb->asoc.total_output_queue_size);
4410 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4411 struct sctp_nets *net_from, int *abort_now)
4413 struct sctp_association *asoc;
4414 struct sctp_sack *sack;
4415 struct sctp_tmit_chunk *tp1, *tp2;
4416 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4417 this_sack_lowest_newack;
4418 uint16_t num_seg, num_dup;
4419 uint16_t wake_him = 0;
4420 unsigned int sack_length;
4423 int accum_moved = 0;
4424 int will_exit_fast_recovery = 0;
4426 struct sctp_nets *net = NULL;
4427 int nonce_sum_flag, ecn_seg_sums = 0;
4428 uint8_t reneged_all = 0;
4429 uint8_t cmt_dac_flag;
4432 * we take any chance we can to service our queues since we cannot
4433 * get awoken when the socket is read from :<
4436 * Now perform the actual SACK handling: 1) Verify that it is not an
4437 * old sack, if so discard. 2) If there is nothing left in the send
4438 * queue (cum-ack is equal to last acked) then you have a duplicate
4439 * too, update any rwnd change and verify no timers are running.
4440 * then return. 3) Process any new consequtive data i.e. cum-ack
4441 * moved process these first and note that it moved. 4) Process any
4442 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4443 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4444 * sync up flightsizes and things, stop all timers and also check
4445 * for shutdown_pending state. If so then go ahead and send off the
4446 * shutdown. If in shutdown recv, send off the shutdown-ack and
4447 * start that timer, Ret. 9) Strike any non-acked things and do FR
4448 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4449 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4450 * if in shutdown_recv state.
4452 SCTP_TCB_LOCK_ASSERT(stcb);
4455 this_sack_lowest_newack = 0;
4457 sack_length = ntohs(ch->ch.chunk_length);
4458 if (sack_length < sizeof(struct sctp_sack_chunk)) {
4460 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4461 printf("Bad size on sack chunk .. to small\n");
4467 SCTP_STAT_INCR(sctps_slowpath_sack);
4468 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4469 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4470 num_seg = ntohs(sack->num_gap_ack_blks);
4471 a_rwnd = (uint32_t) ntohl(sack->a_rwnd);
4474 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4475 num_dup = ntohs(sack->num_dup_tsns);
4478 stcb->asoc.overall_error_count = 0;
4480 #ifdef SCTP_SACK_LOGGING
4481 sctp_log_sack(asoc->last_acked_seq,
4488 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
4490 int off_to_dup, iii;
4493 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4494 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4495 dupdata = (uint32_t *) ((caddr_t)ch + off_to_dup);
4496 for (iii = 0; iii < num_dup; iii++) {
4497 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4502 printf("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4503 off_to_dup, num_dup, sack_length, num_seg);
4508 if (TAILQ_EMPTY(&asoc->send_queue)) {
4509 send_s = asoc->sending_seq;
4511 tp1 = TAILQ_FIRST(&asoc->send_queue);
4512 send_s = tp1->rec.data.TSN_seq;
4515 if (sctp_strict_sacks) {
4516 if (cum_ack == send_s ||
4517 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4521 * no way, we have not even sent this TSN out yet.
4522 * Peer is hopelessly messed up with us.
4527 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4528 0, M_DONTWAIT, 1, MT_DATA);
4530 struct sctp_paramhdr *ph;
4533 oper->m_len = sizeof(struct sctp_paramhdr) +
4535 ph = mtod(oper, struct sctp_paramhdr *);
4536 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4537 ph->param_length = htons(oper->m_len);
4538 ippp = (uint32_t *) (ph + 1);
4539 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4541 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4542 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
4546 /**********************/
4547 /* 1) check the range */
4548 /**********************/
4549 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4550 /* acking something behind */
4553 /* update the Rwnd of the peer */
4554 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4555 TAILQ_EMPTY(&asoc->send_queue) &&
4556 (asoc->stream_queue_cnt == 0)
4558 /* nothing left on send/sent and strmq */
4559 #ifdef SCTP_LOG_RWND
4560 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4561 asoc->peers_rwnd, 0, 0, a_rwnd);
4563 asoc->peers_rwnd = a_rwnd;
4564 if (asoc->sent_queue_retran_cnt) {
4565 asoc->sent_queue_retran_cnt = 0;
4567 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4568 /* SWS sender side engages */
4569 asoc->peers_rwnd = 0;
4571 /* stop any timers */
4572 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4573 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4574 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4575 if (sctp_early_fr) {
4576 if (callout_pending(&net->fr_timer.timer)) {
4577 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4578 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4579 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4582 net->partial_bytes_acked = 0;
4583 net->flight_size = 0;
4585 asoc->total_flight = 0;
4586 asoc->total_flight_count = 0;
4590 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4591 * things. The total byte count acked is tracked in netAckSz AND
4592 * netAck2 is used to track the total bytes acked that are un-
4593 * amibguious and were never retransmitted. We track these on a per
4594 * destination address basis.
4596 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4597 net->prev_cwnd = net->cwnd;
4602 * CMT: Reset CUC algo variable before SACK processing
4604 net->new_pseudo_cumack = 0;
4605 net->will_exit_fast_recovery = 0;
4607 /* process the new consecutive TSN first */
4608 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4610 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4612 last_tsn == tp1->rec.data.TSN_seq) {
4613 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4615 * ECN Nonce: Add the nonce to the sender's
4618 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4620 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4622 * If it is less than ACKED, it is
4623 * now no-longer in flight. Higher
4624 * values may occur during marking
4626 if ((tp1->whoTo->dest_state &
4627 SCTP_ADDR_UNCONFIRMED) &&
4628 (tp1->snd_count < 2)) {
4630 * If there was no retran
4631 * and the address is
4632 * un-confirmed and we sent
4634 * sacked.. its confirmed,
4637 tp1->whoTo->dest_state &=
4638 ~SCTP_ADDR_UNCONFIRMED;
4640 #ifdef SCTP_FLIGHT_LOGGING
4641 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
4642 tp1->whoTo->flight_size,
4645 tp1->rec.data.TSN_seq);
4647 if (tp1->whoTo->flight_size >= tp1->book_size) {
4648 tp1->whoTo->flight_size -= tp1->book_size;
4650 tp1->whoTo->flight_size = 0;
4652 if (asoc->total_flight >= tp1->book_size) {
4653 asoc->total_flight -= tp1->book_size;
4654 if (asoc->total_flight_count > 0)
4655 asoc->total_flight_count--;
4657 asoc->total_flight = 0;
4658 asoc->total_flight_count = 0;
4660 tp1->whoTo->net_ack += tp1->send_size;
4662 /* CMT SFR and DAC algos */
4663 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4664 tp1->whoTo->saw_newack = 1;
4666 if (tp1->snd_count < 2) {
4668 * True non-retransmited
4671 tp1->whoTo->net_ack2 +=
4674 /* update RTO too? */
4677 sctp_calculate_rto(stcb,
4679 &tp1->sent_rcv_time);
4680 tp1->whoTo->rto_pending = 0;
4685 * CMT: CUCv2 algorithm. From the
4686 * cumack'd TSNs, for each TSN being
4687 * acked for the first time, set the
4688 * following variables for the
4689 * corresp destination.
4690 * new_pseudo_cumack will trigger a
4692 * find_(rtx_)pseudo_cumack will
4693 * trigger search for the next
4694 * expected (rtx-)pseudo-cumack.
4696 tp1->whoTo->new_pseudo_cumack = 1;
4697 tp1->whoTo->find_pseudo_cumack = 1;
4698 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4701 #ifdef SCTP_SACK_LOGGING
4702 sctp_log_sack(asoc->last_acked_seq,
4704 tp1->rec.data.TSN_seq,
4707 SCTP_LOG_TSN_ACKED);
4709 #ifdef SCTP_CWND_LOGGING
4710 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4713 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4714 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4715 #ifdef SCTP_AUDITING_ENABLED
4716 sctp_audit_log(0xB3,
4717 (asoc->sent_queue_retran_cnt & 0x000000ff));
4720 tp1->sent = SCTP_DATAGRAM_ACKED;
4725 tp1 = TAILQ_NEXT(tp1, sctp_next);
4727 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4728 /* always set this up to cum-ack */
4729 asoc->this_sack_highest_gap = last_tsn;
4731 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
4733 /* skip corrupt segments */
4739 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4740 * to be greater than the cumack. Also reset saw_newack to 0
4743 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4744 net->saw_newack = 0;
4745 net->this_sack_highest_newack = last_tsn;
4749 * thisSackHighestGap will increase while handling NEW
4750 * segments this_sack_highest_newack will increase while
4751 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4752 * used for CMT DAC algo. saw_newack will also change.
4754 sctp_handle_segments(stcb, asoc, ch, last_tsn,
4755 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4756 num_seg, &ecn_seg_sums);
4758 if (sctp_strict_sacks) {
4760 * validate the biggest_tsn_acked in the gap acks if
4761 * strict adherence is wanted.
4763 if ((biggest_tsn_acked == send_s) ||
4764 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4766 * peer is either confused or we are under
4767 * attack. We must abort.
4774 /*******************************************/
4775 /* cancel ALL T3-send timer if accum moved */
4776 /*******************************************/
4777 if (sctp_cmt_on_off) {
4778 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4779 if (net->new_pseudo_cumack)
4780 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4782 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4787 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4788 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4789 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4793 /********************************************/
4794 /* drop the acked chunks from the sendqueue */
4795 /********************************************/
4796 asoc->last_acked_seq = cum_ack;
4798 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4802 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4806 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4807 /* no more sent on list */
4810 tp2 = TAILQ_NEXT(tp1, sctp_next);
4811 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4813 * Friendlier printf in lieu of panic now that I think its
4817 if (tp1->pr_sctp_on) {
4818 if (asoc->pr_sctp_cnt != 0)
4819 asoc->pr_sctp_cnt--;
4821 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
4822 (asoc->total_flight > 0)) {
4823 printf("Warning flight size incorrect should be 0 is %d\n",
4824 asoc->total_flight);
4825 asoc->total_flight = 0;
4828 sctp_free_bufspace(stcb, asoc, tp1, 1);
4829 sctp_m_freem(tp1->data);
4830 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4831 asoc->sent_queue_cnt_removeable--;
4834 #ifdef SCTP_SACK_LOGGING
4835 sctp_log_sack(asoc->last_acked_seq,
4837 tp1->rec.data.TSN_seq,
4840 SCTP_LOG_FREE_SENT);
4843 asoc->sent_queue_cnt--;
4844 sctp_free_remote_addr(tp1->whoTo);
4846 sctp_free_a_chunk(stcb, tp1);
4849 } while (tp1 != NULL);
4852 if ((wake_him) && (stcb->sctp_socket)) {
4853 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4854 #ifdef SCTP_WAKE_LOGGING
4855 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4857 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4858 #ifdef SCTP_WAKE_LOGGING
4860 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4864 if ((sctp_cmt_on_off == 0) && asoc->fast_retran_loss_recovery && accum_moved) {
4865 if (compare_with_wrap(asoc->last_acked_seq,
4866 asoc->fast_recovery_tsn, MAX_TSN) ||
4867 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4868 /* Setup so we will exit RFC2582 fast recovery */
4869 will_exit_fast_recovery = 1;
4873 * Check for revoked fragments:
4875 * if Previous sack - Had no frags then we can't have any revoked if
4876 * Previous sack - Had frag's then - If we now have frags aka
4877 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4878 * some of them. else - The peer revoked all ACKED fragments, since
4879 * we had some before and now we have NONE.
4882 if (sctp_cmt_on_off) {
4884 * Don't check for revoked if CMT is ON. CMT causes
4885 * reordering of data and acks (received on different
4886 * interfaces) can be persistently reordered. Acking
4887 * followed by apparent revoking and re-acking causes
4888 * unexpected weird behavior. So, at this time, CMT does not
4889 * respect renegs. Renegs will have to be recovered through
4890 * a timeout. Not a big deal for such a rare event.
4893 sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked);
4894 else if (asoc->saw_sack_with_frags) {
4895 int cnt_revoked = 0;
4897 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4899 /* Peer revoked all dg's marked or acked */
4900 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4901 if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
4902 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
4903 tp1->sent = SCTP_DATAGRAM_SENT;
4904 tp1->rec.data.chunk_was_revoked = 1;
4905 tp1->whoTo->flight_size += tp1->book_size;
4906 asoc->total_flight_count++;
4907 asoc->total_flight += tp1->book_size;
4915 asoc->saw_sack_with_frags = 0;
4918 asoc->saw_sack_with_frags = 1;
4920 asoc->saw_sack_with_frags = 0;
4923 sctp_cwnd_update(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4925 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4926 /* nothing left in-flight */
4927 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4928 /* stop all timers */
4929 if (sctp_early_fr) {
4930 if (callout_pending(&net->fr_timer.timer)) {
4931 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4932 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4933 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4936 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4937 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4938 net->flight_size = 0;
4939 net->partial_bytes_acked = 0;
4941 asoc->total_flight = 0;
4942 asoc->total_flight_count = 0;
4944 /**********************************/
4945 /* Now what about shutdown issues */
4946 /**********************************/
4947 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4948 /* nothing left on sendqueue.. consider done */
4949 #ifdef SCTP_LOG_RWND
4950 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4951 asoc->peers_rwnd, 0, 0, a_rwnd);
4953 asoc->peers_rwnd = a_rwnd;
4954 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4955 /* SWS sender side engages */
4956 asoc->peers_rwnd = 0;
4959 if ((asoc->stream_queue_cnt == 1) &&
4960 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4961 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4962 (asoc->locked_on_sending)
4964 struct sctp_stream_queue_pending *sp;
4967 * I may be in a state where we got all across.. but
4968 * cannot write more due to a shutdown... we abort
4969 * since the user did not indicate EOR in this case.
4971 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4973 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) {
4974 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4975 asoc->locked_on_sending = NULL;
4976 asoc->stream_queue_cnt--;
4979 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4980 (asoc->stream_queue_cnt == 0)) {
4981 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4982 /* Need to abort here */
4988 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4989 0, M_DONTWAIT, 1, MT_DATA);
4991 struct sctp_paramhdr *ph;
4994 oper->m_len = sizeof(struct sctp_paramhdr) +
4996 ph = mtod(oper, struct sctp_paramhdr *);
4997 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4998 ph->param_length = htons(oper->m_len);
4999 ippp = (uint32_t *) (ph + 1);
5000 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5002 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5003 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper);
5006 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
5007 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5008 sctp_stop_timers_for_shutdown(stcb);
5009 sctp_send_shutdown(stcb,
5010 stcb->asoc.primary_destination);
5011 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5012 stcb->sctp_ep, stcb, asoc->primary_destination);
5013 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5014 stcb->sctp_ep, stcb, asoc->primary_destination);
5017 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5018 (asoc->stream_queue_cnt == 0)) {
5019 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5022 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
5023 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5024 sctp_send_shutdown_ack(stcb,
5025 stcb->asoc.primary_destination);
5027 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5028 stcb->sctp_ep, stcb, asoc->primary_destination);
5033 * Now here we are going to recycle net_ack for a different use...
5036 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5041 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5042 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5043 * automatically ensure that.
5045 if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) {
5046 this_sack_lowest_newack = cum_ack;
5049 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5050 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5052 /*********************************************/
5053 /* Here we perform PR-SCTP procedures */
5055 /*********************************************/
5056 /* C1. update advancedPeerAckPoint */
5057 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5058 asoc->advanced_peer_ack_point = cum_ack;
5060 /* C2. try to further move advancedPeerAckPoint ahead */
5062 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5063 struct sctp_tmit_chunk *lchk;
5065 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5066 /* C3. See if we need to send a Fwd-TSN */
5067 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5070 * ISSUE with ECN, see FWD-TSN processing for notes
5071 * on issues that will occur when the ECN NONCE
5072 * stuff is put into SCTP for cross checking.
5074 send_forward_tsn(stcb, asoc);
5077 * ECN Nonce: Disable Nonce Sum check when FWD TSN
5078 * is sent and store resync tsn
5080 asoc->nonce_sum_check = 0;
5081 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5083 /* Assure a timer is up */
5084 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5085 stcb->sctp_ep, stcb, lchk->whoTo);
5090 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
5091 * (net->fast_retran_loss_recovery == 0)))
5093 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5094 if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) {
5095 /* out of a RFC2582 Fast recovery window? */
5096 if (net->net_ack > 0) {
5098 * per section 7.2.3, are there any
5099 * destinations that had a fast retransmit
5100 * to them. If so what we need to do is
5101 * adjust ssthresh and cwnd.
5103 struct sctp_tmit_chunk *lchk;
5105 #ifdef SCTP_HIGH_SPEED
5106 sctp_hs_cwnd_decrease(stcb, net);
5108 #ifdef SCTP_CWND_MONITOR
5109 int old_cwnd = net->cwnd;
5112 net->ssthresh = net->cwnd / 2;
5113 if (net->ssthresh < (net->mtu * 2)) {
5114 net->ssthresh = 2 * net->mtu;
5116 net->cwnd = net->ssthresh;
5117 #ifdef SCTP_CWND_MONITOR
5118 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
5119 SCTP_CWND_LOG_FROM_FR);
5123 lchk = TAILQ_FIRST(&asoc->send_queue);
5125 net->partial_bytes_acked = 0;
5126 /* Turn on fast recovery window */
5127 asoc->fast_retran_loss_recovery = 1;
5129 /* Mark end of the window */
5130 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
5132 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
5136 * CMT fast recovery -- per destination
5137 * recovery variable.
5139 net->fast_retran_loss_recovery = 1;
5142 /* Mark end of the window */
5143 net->fast_recovery_tsn = asoc->sending_seq - 1;
5145 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
5151 * Disable Nonce Sum Checking and store the
5154 asoc->nonce_sum_check = 0;
5155 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
5157 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
5158 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5159 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5160 stcb->sctp_ep, stcb, net);
5162 } else if (net->net_ack > 0) {
5164 * Mark a peg that we WOULD have done a cwnd
5165 * reduction but RFC2582 prevented this action.
5167 SCTP_STAT_INCR(sctps_fastretransinrtt);
5172 /******************************************************************
5173 * Here we do the stuff with ECN Nonce checking.
5174 * We basically check to see if the nonce sum flag was incorrect
5175 * or if resynchronization needs to be done. Also if we catch a
5176 * misbehaving receiver we give him the kick.
5177 ******************************************************************/
5179 if (asoc->ecn_nonce_allowed) {
5180 if (asoc->nonce_sum_check) {
5181 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5182 if (asoc->nonce_wait_for_ecne == 0) {
5183 struct sctp_tmit_chunk *lchk;
5185 lchk = TAILQ_FIRST(&asoc->send_queue);
5186 asoc->nonce_wait_for_ecne = 1;
5188 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5190 asoc->nonce_wait_tsn = asoc->sending_seq;
5193 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5194 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5196 * Misbehaving peer. We need
5197 * to react to this guy
5199 asoc->ecn_allowed = 0;
5200 asoc->ecn_nonce_allowed = 0;
5205 /* See if Resynchronization Possible */
5206 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5207 asoc->nonce_sum_check = 1;
5209 * now we must calculate what the base is.
5210 * We do this based on two things, we know
5211 * the total's for all the segments
5212 * gap-acked in the SACK, its stored in
5213 * ecn_seg_sums. We also know the SACK's
5214 * nonce sum, its in nonce_sum_flag. So we
5215 * can build a truth table to back-calculate
5217 * asoc->nonce_sum_expect_base:
5219 * SACK-flag-Value Seg-Sums Base 0 0 0
5223 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5227 /* Now are we exiting loss recovery ? */
5228 if (will_exit_fast_recovery) {
5229 /* Ok, we must exit fast recovery */
5230 asoc->fast_retran_loss_recovery = 0;
5232 if ((asoc->sat_t3_loss_recovery) &&
5233 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5235 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5236 /* end satellite t3 loss recovery */
5237 asoc->sat_t3_loss_recovery = 0;
5239 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5240 if (net->will_exit_fast_recovery) {
5241 /* Ok, we must exit fast recovery */
5242 net->fast_retran_loss_recovery = 0;
5246 /* Adjust and set the new rwnd value */
5247 #ifdef SCTP_LOG_RWND
5248 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5249 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
5252 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5253 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
5254 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5255 /* SWS sender side engages */
5256 asoc->peers_rwnd = 0;
5259 * Now we must setup so we have a timer up for anyone with
5264 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5265 if (net->flight_size) {
5267 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5268 stcb->sctp_ep, stcb, net);
5271 if ((j == 0) && (!TAILQ_EMPTY(&asoc->sent_queue)) && (asoc->sent_queue_retran_cnt == 0)) {
5272 /* huh, this should not happen */
5274 panic("Flight size incorrect? fixing??");
5276 printf("Flight size incorrect? fixing??\n");
5277 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5278 net->flight_size = 0;
5280 asoc->total_flight = 0;
5281 asoc->total_flight_count = 0;
5282 asoc->sent_queue_retran_cnt = 0;
5283 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5284 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5285 tp1->whoTo->flight_size += tp1->book_size;
5286 asoc->total_flight += tp1->book_size;
5287 asoc->total_flight_count++;
5288 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5289 asoc->sent_queue_retran_cnt++;
5295 #ifdef SCTP_SACK_RWND_LOGGING
5296 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5298 stcb->asoc.peers_rwnd,
5299 stcb->asoc.total_flight,
5300 stcb->asoc.total_output_queue_size);
5307 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5308 struct sctp_nets *netp, int *abort_flag)
5311 uint32_t cum_ack, a_rwnd;
5313 cum_ack = ntohl(cp->cumulative_tsn_ack);
5314 /* Arrange so a_rwnd does NOT change */
5315 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5317 /* Now call the express sack handling */
5318 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5322 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5323 struct sctp_stream_in *strmin)
5325 struct sctp_queued_to_read *ctl, *nctl;
5326 struct sctp_association *asoc;
5330 tt = strmin->last_sequence_delivered;
5332 * First deliver anything prior to and including the stream no that
5335 ctl = TAILQ_FIRST(&strmin->inqueue);
5337 nctl = TAILQ_NEXT(ctl, next);
5338 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5339 (tt == ctl->sinfo_ssn)) {
5340 /* this is deliverable now */
5341 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5342 /* subtract pending on streams */
5343 asoc->size_on_all_streams -= ctl->length;
5344 sctp_ucount_decr(asoc->cnt_on_all_streams);
5345 /* deliver it to at least the delivery-q */
5346 if (stcb->sctp_socket) {
5347 sctp_add_to_readq(stcb->sctp_ep, stcb,
5349 &stcb->sctp_socket->so_rcv, 1);
5352 /* no more delivery now. */
5358 * now we must deliver things in queue the normal way if any are
5361 tt = strmin->last_sequence_delivered + 1;
5362 ctl = TAILQ_FIRST(&strmin->inqueue);
5364 nctl = TAILQ_NEXT(ctl, next);
5365 if (tt == ctl->sinfo_ssn) {
5366 /* this is deliverable now */
5367 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5368 /* subtract pending on streams */
5369 asoc->size_on_all_streams -= ctl->length;
5370 sctp_ucount_decr(asoc->cnt_on_all_streams);
5371 /* deliver it to at least the delivery-q */
5372 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5373 if (stcb->sctp_socket) {
5374 sctp_add_to_readq(stcb->sctp_ep, stcb,
5376 &stcb->sctp_socket->so_rcv, 1);
5378 tt = strmin->last_sequence_delivered + 1;
5387 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5388 struct sctp_forward_tsn_chunk *fwd, int *abort_flag)
5391 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5392 * forward TSN, when the SACK comes back that acknowledges the
5393 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5394 * get quite tricky since we may have sent more data interveneing
5395 * and must carefully account for what the SACK says on the nonce
5396 * and any gaps that are reported. This work will NOT be done here,
5397 * but I note it here since it is really related to PR-SCTP and
5401 /* The pr-sctp fwd tsn */
5403 * here we will perform all the data receiver side steps for
5404 * processing FwdTSN, as required in by pr-sctp draft:
5406 * Assume we get FwdTSN(x):
5408 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5409 * others we have 3) examine and update re-ordering queue on
5410 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5411 * report where we are.
5413 struct sctp_strseq *stseq;
5414 struct sctp_association *asoc;
5415 uint32_t new_cum_tsn, gap, back_out_htsn;
5416 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
5417 struct sctp_stream_in *strm;
5418 struct sctp_tmit_chunk *chk, *at;
5420 cumack_set_flag = 0;
5423 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5425 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
5426 printf("Bad size too small/big fwd-tsn\n");
5431 m_size = (stcb->asoc.mapping_array_size << 3);
5432 /*************************************************************/
5433 /* 1. Here we update local cumTSN and shift the bitmap array */
5434 /*************************************************************/
5435 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5437 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5438 asoc->cumulative_tsn == new_cum_tsn) {
5439 /* Already got there ... */
5442 back_out_htsn = asoc->highest_tsn_inside_map;
5443 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
5445 asoc->highest_tsn_inside_map = new_cum_tsn;
5446 #ifdef SCTP_MAP_LOGGING
5447 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5451 * now we know the new TSN is more advanced, let's find the actual
5454 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
5456 (new_cum_tsn == asoc->mapping_array_base_tsn)) {
5457 gap = new_cum_tsn - asoc->mapping_array_base_tsn;
5459 /* try to prevent underflow here */
5460 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
5463 if (gap > m_size || gap < 0) {
5464 asoc->highest_tsn_inside_map = back_out_htsn;
5465 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5467 * out of range (of single byte chunks in the rwnd I
5468 * give out) too questionable. better to drop it
5473 if (asoc->highest_tsn_inside_map >
5474 asoc->mapping_array_base_tsn) {
5475 gap = asoc->highest_tsn_inside_map -
5476 asoc->mapping_array_base_tsn;
5478 gap = asoc->highest_tsn_inside_map +
5479 (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
5481 cumack_set_flag = 1;
5483 for (i = 0; i <= gap; i++) {
5484 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
5487 * Now after marking all, slide thing forward but no sack please.
5489 sctp_sack_check(stcb, 0, 0, abort_flag);
5493 if (cumack_set_flag) {
5495 * fwd-tsn went outside my gap array - not a common
5496 * occurance. Do the same thing we do when a cookie-echo
5499 asoc->highest_tsn_inside_map = new_cum_tsn - 1;
5500 asoc->mapping_array_base_tsn = new_cum_tsn;
5501 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
5502 #ifdef SCTP_MAP_LOGGING
5503 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5505 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5507 /*************************************************************/
5508 /* 2. Clear up re-assembly queue */
5509 /*************************************************************/
5512 * First service it if pd-api is up, just in case we can progress it
5515 if (asoc->fragmented_delivery_inprogress) {
5516 sctp_service_reassembly(stcb, asoc);
5518 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5519 /* For each one on here see if we need to toss it */
5521 * For now large messages held on the reasmqueue that are
5522 * complete will be tossed too. We could in theory do more
5523 * work to spin through and stop after dumping one msg aka
5524 * seeing the start of a new msg at the head, and call the
5525 * delivery function... to see if it can be delivered... But
5526 * for now we just dump everything on the queue.
5528 chk = TAILQ_FIRST(&asoc->reasmqueue);
5530 at = TAILQ_NEXT(chk, sctp_next);
5531 if (compare_with_wrap(asoc->cumulative_tsn,
5532 chk->rec.data.TSN_seq, MAX_TSN) ||
5533 asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
5534 /* It needs to be tossed */
5535 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5536 if (compare_with_wrap(chk->rec.data.TSN_seq,
5537 asoc->tsn_last_delivered, MAX_TSN)) {
5538 asoc->tsn_last_delivered =
5539 chk->rec.data.TSN_seq;
5540 asoc->str_of_pdapi =
5541 chk->rec.data.stream_number;
5542 asoc->ssn_of_pdapi =
5543 chk->rec.data.stream_seq;
5544 asoc->fragment_flags =
5545 chk->rec.data.rcv_flags;
5547 asoc->size_on_reasm_queue -= chk->send_size;
5548 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5551 /* Clear up any stream problem */
5552 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5553 SCTP_DATA_UNORDERED &&
5554 (compare_with_wrap(chk->rec.data.stream_seq,
5555 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5558 * We must dump forward this streams
5559 * sequence number if the chunk is
5560 * not unordered that is being
5561 * skipped. There is a chance that
5562 * if the peer does not include the
5563 * last fragment in its FWD-TSN we
5564 * WILL have a problem here since
5565 * you would have a partial chunk in
5566 * queue that may not be
5567 * deliverable. Also if a Partial
5568 * delivery API as started the user
5569 * may get a partial chunk. The next
5570 * read returning a new chunk...
5571 * really ugly but I see no way
5572 * around it! Maybe a notify??
5574 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5575 chk->rec.data.stream_seq;
5578 sctp_m_freem(chk->data);
5581 sctp_free_remote_addr(chk->whoTo);
5582 sctp_free_a_chunk(stcb, chk);
5585 * Ok we have gone beyond the end of the
5586 * fwd-tsn's mark. Some checks...
5588 if ((asoc->fragmented_delivery_inprogress) &&
5589 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5591 * Special case PD-API is up and
5592 * what we fwd-tsn' over includes
5593 * one that had the LAST_FRAG. We no
5594 * longer need to do the PD-API.
5596 asoc->fragmented_delivery_inprogress = 0;
5597 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5598 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
5606 if (asoc->fragmented_delivery_inprogress) {
5608 * Ok we removed cnt_gone chunks in the PD-API queue that
5609 * were being delivered. So now we must turn off the flag.
5611 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5612 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
5613 asoc->fragmented_delivery_inprogress = 0;
5615 /*************************************************************/
5616 /* 3. Update the PR-stream re-ordering queues */
5617 /*************************************************************/
5618 stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd));
5619 fwd_sz -= sizeof(*fwd);
5624 num_str = fwd_sz / sizeof(struct sctp_strseq);
5625 for (i = 0; i < num_str; i++) {
5630 xx = (unsigned char *)&stseq[i];
5631 st = ntohs(stseq[i].stream);
5632 stseq[i].stream = st;
5633 st = ntohs(stseq[i].sequence);
5634 stseq[i].sequence = st;
5636 if (stseq[i].stream > asoc->streamincnt) {
5638 * It is arguable if we should continue.
5639 * Since the peer sent bogus stream info we
5640 * may be in deep trouble.. a return may be
5645 strm = &asoc->strmin[stseq[i].stream];
5646 if (compare_with_wrap(stseq[i].sequence,
5647 strm->last_sequence_delivered, MAX_SEQ)) {
5648 /* Update the sequence number */
5649 strm->last_sequence_delivered =
5652 /* now kick the stream the new way */
5653 sctp_kick_prsctp_reorder_queue(stcb, strm);