2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 #include "opt_ipsec.h"
38 #include "opt_inet6.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/sysctl.h>
52 #include <net/route.h>
55 #include <sys/limits.h>
56 #include <machine/cpu.h>
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/in_var.h>
66 #include <netinet/ip_var.h>
68 #include <netinet6/ip6_var.h>
70 #include <netinet/ip_icmp.h>
71 #include <netinet/icmp_var.h>
73 #include <netinet/sctp_os.h>
74 #include <netinet/sctp_var.h>
75 #include <netinet/sctp_pcb.h>
76 #include <netinet/sctp_header.h>
77 #include <netinet/sctputil.h>
78 #include <netinet/sctp_output.h>
79 #include <netinet/sctp_input.h>
80 #include <netinet/sctp_indata.h>
81 #include <netinet/sctp_uio.h>
82 #include <netinet/sctp_timer.h>
84 #include <netinet6/ipsec.h>
85 #include <netkey/key.h>
90 extern uint32_t sctp_debug_on;
95 * NOTES: On the outbound side of things I need to check the sack timer to
96 * see if I should generate a sack into the chunk queue (if I have data to
97 * send that is and will be sending it .. for bundling.
99 * The callback in sctp_usrreq.c will get called when the socket is read from.
100 * This will cause sctp_service_queues() to get called on the top entry in
104 extern int sctp_strict_sacks;
107 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
109 uint32_t calc, calc_w_oh;
112 * This is really set wrong with respect to a 1-2-m socket. Since
113 * the sb_cc is the count that everyone as put up. When we re-write
114 * sctp_soreceive then we will fix this so that ONLY this
115 * associations data is taken into account.
117 if (stcb->sctp_socket == NULL)
120 if (stcb->asoc.sb_cc == 0 &&
121 asoc->size_on_reasm_queue == 0 &&
122 asoc->size_on_all_streams == 0) {
123 /* Full rwnd granted */
124 asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.sb_hiwat,
128 /* get actual space */
129 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
132 * take out what has NOT been put on socket queue and we yet hold
135 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
136 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
143 /* what is the overhead of all these rwnd's */
144 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
145 asoc->my_rwnd = calc;
146 if (calc_w_oh == 0) {
148 * If our overhead is greater than the advertised rwnd, we
149 * clamp the rwnd to 1. This lets us still accept inbound
150 * segments, but hopefully will shut the sender down when he
151 * finally gets the message.
157 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
158 /* SWS engaged, tell peer none left */
164 /* Calculate what the rwnd would be */
167 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
169 uint32_t calc = 0, calc_w_oh;
172 * This is really set wrong with respect to a 1-2-m socket. Since
173 * the sb_cc is the count that everyone as put up. When we re-write
174 * sctp_soreceive then we will fix this so that ONLY this
175 * associations data is taken into account.
177 if (stcb->sctp_socket == NULL)
180 if (stcb->asoc.sb_cc == 0 &&
181 asoc->size_on_reasm_queue == 0 &&
182 asoc->size_on_all_streams == 0) {
183 /* Full rwnd granted */
184 calc = max(stcb->sctp_socket->so_rcv.sb_hiwat,
188 /* get actual space */
189 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
192 * take out what has NOT been put on socket queue and we yet hold
195 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
196 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
202 /* what is the overhead of all these rwnd's */
203 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
204 if (calc_w_oh == 0) {
206 * If our overhead is greater than the advertised rwnd, we
207 * clamp the rwnd to 1. This lets us still accept inbound
208 * segments, but hopefully will shut the sender down when he
209 * finally gets the message.
215 (calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
216 /* SWS engaged, tell peer none left */
226 * Build out our readq entry based on the incoming packet.
228 struct sctp_queued_to_read *
229 sctp_build_readq_entry(struct sctp_tcb *stcb,
230 struct sctp_nets *net,
231 uint32_t tsn, uint32_t ppid,
232 uint32_t context, uint16_t stream_no,
233 uint16_t stream_seq, uint8_t flags,
236 struct sctp_queued_to_read *read_queue_e = NULL;
238 sctp_alloc_a_readq(stcb, read_queue_e);
239 if (read_queue_e == NULL) {
242 read_queue_e->sinfo_stream = stream_no;
243 read_queue_e->sinfo_ssn = stream_seq;
244 read_queue_e->sinfo_flags = (flags << 8);
245 read_queue_e->sinfo_ppid = ppid;
246 read_queue_e->sinfo_context = stcb->asoc.context;
247 read_queue_e->sinfo_timetolive = 0;
248 read_queue_e->sinfo_tsn = tsn;
249 read_queue_e->sinfo_cumtsn = tsn;
250 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
251 read_queue_e->whoFrom = net;
252 read_queue_e->length = 0;
253 atomic_add_int(&net->ref_count, 1);
254 read_queue_e->data = dm;
255 read_queue_e->spec_flags = 0;
256 read_queue_e->tail_mbuf = NULL;
257 read_queue_e->stcb = stcb;
258 read_queue_e->port_from = stcb->rport;
259 read_queue_e->do_not_ref_stcb = 0;
260 read_queue_e->end_added = 0;
261 read_queue_e->pdapi_aborted = 0;
263 return (read_queue_e);
268 * Build out our readq entry based on the incoming packet.
270 static struct sctp_queued_to_read *
271 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
272 struct sctp_tmit_chunk *chk)
274 struct sctp_queued_to_read *read_queue_e = NULL;
276 sctp_alloc_a_readq(stcb, read_queue_e);
277 if (read_queue_e == NULL) {
280 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
281 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
282 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
283 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
284 read_queue_e->sinfo_context = stcb->asoc.context;
285 read_queue_e->sinfo_timetolive = 0;
286 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
287 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
288 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
289 read_queue_e->whoFrom = chk->whoTo;
290 read_queue_e->length = 0;
291 atomic_add_int(&chk->whoTo->ref_count, 1);
292 read_queue_e->data = chk->data;
293 read_queue_e->tail_mbuf = NULL;
294 read_queue_e->stcb = stcb;
295 read_queue_e->port_from = stcb->rport;
296 read_queue_e->spec_flags = 0;
297 read_queue_e->do_not_ref_stcb = 0;
298 read_queue_e->end_added = 0;
299 read_queue_e->pdapi_aborted = 0;
301 return (read_queue_e);
306 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
307 struct sctp_sndrcvinfo *sinfo)
309 struct sctp_sndrcvinfo *outinfo;
313 int use_extended = 0;
315 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
316 /* user does not want the sndrcv ctl */
319 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
321 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
323 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
327 ret = sctp_get_mbuf_for_msg(len,
328 0, M_DONTWAIT, 1, MT_DATA);
334 /* We need a CMSG header followed by the struct */
335 cmh = mtod(ret, struct cmsghdr *);
336 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
337 cmh->cmsg_level = IPPROTO_SCTP;
339 cmh->cmsg_type = SCTP_EXTRCV;
341 memcpy(outinfo, sinfo, len);
343 cmh->cmsg_type = SCTP_SNDRCV;
347 SCTP_BUF_LEN(ret) = cmh->cmsg_len;
353 * We are delivering currently from the reassembly queue. We must continue to
354 * deliver until we either: 1) run out of space. 2) run out of sequential
355 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
358 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
360 struct sctp_tmit_chunk *chk;
366 cntDel = stream_no = 0;
367 struct sctp_queued_to_read *control, *ctl, *ctlat;
369 if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
370 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
372 /* socket above is long gone */
373 asoc->fragmented_delivery_inprogress = 0;
374 chk = TAILQ_FIRST(&asoc->reasmqueue);
376 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
377 asoc->size_on_reasm_queue -= chk->send_size;
378 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
380 * Lose the data pointer, since its in the socket
384 sctp_m_freem(chk->data);
387 /* Now free the address and data */
388 sctp_free_remote_addr(chk->whoTo);
389 sctp_free_a_chunk(stcb, chk);
390 chk = TAILQ_FIRST(&asoc->reasmqueue);
394 SCTP_TCB_LOCK_ASSERT(stcb);
396 chk = TAILQ_FIRST(&asoc->reasmqueue);
400 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
401 /* Can't deliver more :< */
404 stream_no = chk->rec.data.stream_number;
405 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
406 if (nxt_todel != chk->rec.data.stream_seq &&
407 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
409 * Not the next sequence to deliver in its stream OR
414 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
416 control = sctp_build_readq_entry_chk(stcb, chk);
417 if (control == NULL) {
421 /* save it off for our future deliveries */
422 stcb->asoc.control_pdapi = control;
423 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
427 sctp_add_to_readq(stcb->sctp_ep,
428 stcb, control, &stcb->sctp_socket->so_rcv, end);
431 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
435 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
436 stcb->asoc.control_pdapi,
437 chk->data, end, chk->rec.data.TSN_seq,
438 &stcb->sctp_socket->so_rcv)) {
440 * something is very wrong, either
441 * control_pdapi is NULL, or the tail_mbuf
442 * is corrupt, or there is a EOM already on
445 if (stcb->asoc.control_pdapi == NULL) {
446 panic("This should not happen control_pdapi NULL?");
448 if (stcb->asoc.control_pdapi->tail_mbuf == NULL) {
449 panic("This should not happen, tail_mbuf not being maintained?");
451 /* if we did not panic, it was a EOM */
452 panic("Bad chunking ??");
456 /* pull it we did it */
457 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
458 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
459 asoc->fragmented_delivery_inprogress = 0;
460 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
461 asoc->strmin[stream_no].last_sequence_delivered++;
463 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
464 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
466 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
468 * turn the flag back on since we just delivered
471 asoc->fragmented_delivery_inprogress = 1;
473 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
474 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
475 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
476 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
478 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
479 asoc->size_on_reasm_queue -= chk->send_size;
480 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
481 /* free up the chk */
483 sctp_free_remote_addr(chk->whoTo);
484 sctp_free_a_chunk(stcb, chk);
486 if (asoc->fragmented_delivery_inprogress == 0) {
488 * Now lets see if we can deliver the next one on
492 struct sctp_stream_in *strm;
494 strm = &asoc->strmin[stream_no];
495 nxt_todel = strm->last_sequence_delivered + 1;
496 ctl = TAILQ_FIRST(&strm->inqueue);
497 if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
498 while (ctl != NULL) {
499 /* Deliver more if we can. */
500 if (nxt_todel == ctl->sinfo_ssn) {
501 ctlat = TAILQ_NEXT(ctl, next);
502 TAILQ_REMOVE(&strm->inqueue, ctl, next);
503 asoc->size_on_all_streams -= ctl->length;
504 sctp_ucount_decr(asoc->cnt_on_all_streams);
505 strm->last_sequence_delivered++;
506 sctp_add_to_readq(stcb->sctp_ep, stcb,
508 &stcb->sctp_socket->so_rcv, 1);
513 nxt_todel = strm->last_sequence_delivered + 1;
518 chk = TAILQ_FIRST(&asoc->reasmqueue);
523 * Queue the chunk either right into the socket buffer if it is the next one
524 * to go OR put it in the correct place in the delivery queue. If we do
525 * append to the so_buf, keep doing so until we are out of order. One big
526 * question still remains, what to do when the socket buffer is FULL??
529 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
530 struct sctp_queued_to_read *control, int *abort_flag)
533 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
534 * all the data in one stream this could happen quite rapidly. One
535 * could use the TSN to keep track of things, but this scheme breaks
536 * down in the other type of stream useage that could occur. Send a
537 * single msg to stream 0, send 4Billion messages to stream 1, now
538 * send a message to stream 0. You have a situation where the TSN
539 * has wrapped but not in the stream. Is this worth worrying about
540 * or should we just change our queue sort at the bottom to be by
543 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
544 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
545 * assignment this could happen... and I don't see how this would be
546 * a violation. So for now I am undecided an will leave the sort by
547 * SSN alone. Maybe a hybred approach is the answer
550 struct sctp_stream_in *strm;
551 struct sctp_queued_to_read *at;
557 asoc->size_on_all_streams += control->length;
558 sctp_ucount_incr(asoc->cnt_on_all_streams);
559 strm = &asoc->strmin[control->sinfo_stream];
560 nxt_todel = strm->last_sequence_delivered + 1;
561 #ifdef SCTP_STR_LOGGING
562 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
565 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
566 printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
567 (uint32_t) control->sinfo_stream,
568 (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel);
571 if (compare_with_wrap(strm->last_sequence_delivered,
572 control->sinfo_ssn, MAX_SEQ) ||
573 (strm->last_sequence_delivered == control->sinfo_ssn)) {
574 /* The incoming sseq is behind where we last delivered? */
576 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
577 printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
579 strm->last_sequence_delivered);
583 * throw it in the stream so it gets cleaned up in
584 * association destruction
586 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
587 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
588 0, M_DONTWAIT, 1, MT_DATA);
590 struct sctp_paramhdr *ph;
593 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
594 (sizeof(uint32_t) * 3);
595 ph = mtod(oper, struct sctp_paramhdr *);
596 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
597 ph->param_length = htons(SCTP_BUF_LEN(oper));
598 ippp = (uint32_t *) (ph + 1);
599 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
601 *ippp = control->sinfo_tsn;
603 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
605 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
606 sctp_abort_an_association(stcb->sctp_ep, stcb,
607 SCTP_PEER_FAULTY, oper);
613 if (nxt_todel == control->sinfo_ssn) {
614 /* can be delivered right away? */
615 #ifdef SCTP_STR_LOGGING
616 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
619 asoc->size_on_all_streams -= control->length;
620 sctp_ucount_decr(asoc->cnt_on_all_streams);
621 strm->last_sequence_delivered++;
622 sctp_add_to_readq(stcb->sctp_ep, stcb,
624 &stcb->sctp_socket->so_rcv, 1);
625 control = TAILQ_FIRST(&strm->inqueue);
626 while (control != NULL) {
628 nxt_todel = strm->last_sequence_delivered + 1;
629 if (nxt_todel == control->sinfo_ssn) {
630 at = TAILQ_NEXT(control, next);
631 TAILQ_REMOVE(&strm->inqueue, control, next);
632 asoc->size_on_all_streams -= control->length;
633 sctp_ucount_decr(asoc->cnt_on_all_streams);
634 strm->last_sequence_delivered++;
636 * We ignore the return of deliver_data here
637 * since we always can hold the chunk on the
638 * d-queue. And we have a finite number that
639 * can be delivered from the strq.
641 #ifdef SCTP_STR_LOGGING
642 sctp_log_strm_del(control, NULL,
643 SCTP_STR_LOG_FROM_IMMED_DEL);
645 sctp_add_to_readq(stcb->sctp_ep, stcb,
647 &stcb->sctp_socket->so_rcv, 1);
656 * Ok, we did not deliver this guy, find the correct place
657 * to put it on the queue.
659 if (TAILQ_EMPTY(&strm->inqueue)) {
661 #ifdef SCTP_STR_LOGGING
662 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
664 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
666 TAILQ_FOREACH(at, &strm->inqueue, next) {
667 if (compare_with_wrap(at->sinfo_ssn,
668 control->sinfo_ssn, MAX_SEQ)) {
670 * one in queue is bigger than the
671 * new one, insert before this one
673 #ifdef SCTP_STR_LOGGING
674 sctp_log_strm_del(control, at,
675 SCTP_STR_LOG_FROM_INSERT_MD);
677 TAILQ_INSERT_BEFORE(at, control, next);
679 } else if (at->sinfo_ssn == control->sinfo_ssn) {
681 * Gak, He sent me a duplicate str
685 * foo bar, I guess I will just free
686 * this new guy, should we abort
687 * too? FIX ME MAYBE? Or it COULD be
688 * that the SSN's have wrapped.
689 * Maybe I should compare to TSN
690 * somehow... sigh for now just blow
695 sctp_m_freem(control->data);
696 control->data = NULL;
697 asoc->size_on_all_streams -= control->length;
698 sctp_ucount_decr(asoc->cnt_on_all_streams);
699 sctp_free_remote_addr(control->whoFrom);
700 sctp_free_a_readq(stcb, control);
703 if (TAILQ_NEXT(at, next) == NULL) {
705 * We are at the end, insert
708 #ifdef SCTP_STR_LOGGING
709 sctp_log_strm_del(control, at,
710 SCTP_STR_LOG_FROM_INSERT_TL);
712 TAILQ_INSERT_AFTER(&strm->inqueue,
723 * Returns two things: You get the total size of the deliverable parts of the
724 * first fragmented message on the reassembly queue. And you get a 1 back if
725 * all of the message is ready or a 0 back if the message is still incomplete
728 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
730 struct sctp_tmit_chunk *chk;
734 chk = TAILQ_FIRST(&asoc->reasmqueue);
736 /* nothing on the queue */
739 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
740 /* Not a first on the queue */
743 tsn = chk->rec.data.TSN_seq;
745 if (tsn != chk->rec.data.TSN_seq) {
748 *t_size += chk->send_size;
749 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
753 chk = TAILQ_NEXT(chk, sctp_next);
759 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
761 struct sctp_tmit_chunk *chk;
766 chk = TAILQ_FIRST(&asoc->reasmqueue);
769 asoc->size_on_reasm_queue = 0;
770 asoc->cnt_on_reasm_queue = 0;
773 if (asoc->fragmented_delivery_inprogress == 0) {
775 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
776 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
777 (nxt_todel == chk->rec.data.stream_seq ||
778 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
780 * Yep the first one is here and its ok to deliver
783 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
784 (tsize > stcb->sctp_ep->partial_delivery_point))) {
787 * Yes, we setup to start reception, by
788 * backing down the TSN just in case we
789 * can't deliver. If we
791 asoc->fragmented_delivery_inprogress = 1;
792 asoc->tsn_last_delivered =
793 chk->rec.data.TSN_seq - 1;
795 chk->rec.data.stream_number;
796 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
797 asoc->pdapi_ppid = chk->rec.data.payloadtype;
798 asoc->fragment_flags = chk->rec.data.rcv_flags;
799 sctp_service_reassembly(stcb, asoc);
804 * Service re-assembly will deliver stream data queued at
805 * the end of fragmented delivery.. but it wont know to go
806 * back and call itself again... we do that here with the
809 sctp_service_reassembly(stcb, asoc);
810 if (asoc->fragmented_delivery_inprogress == 0) {
812 * finished our Fragmented delivery, could be more
821 * Dump onto the re-assembly queue, in its proper place. After dumping on the
822 * queue, see if anthing can be delivered. If so pull it off (or as much as
823 * we can. If we run out of space then we must dump what we can and set the
824 * appropriate flag to say we queued what we could.
827 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
828 struct sctp_tmit_chunk *chk, int *abort_flag)
831 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
833 struct sctp_tmit_chunk *at, *prev, *next;
836 cum_ackp1 = asoc->tsn_last_delivered + 1;
837 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
838 /* This is the first one on the queue */
839 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
841 * we do not check for delivery of anything when only one
844 asoc->size_on_reasm_queue = chk->send_size;
845 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
846 if (chk->rec.data.TSN_seq == cum_ackp1) {
847 if (asoc->fragmented_delivery_inprogress == 0 &&
848 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
849 SCTP_DATA_FIRST_FRAG) {
851 * An empty queue, no delivery inprogress,
852 * we hit the next one and it does NOT have
853 * a FIRST fragment mark.
856 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
857 printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n");
860 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
861 0, M_DONTWAIT, 1, MT_DATA);
864 struct sctp_paramhdr *ph;
868 sizeof(struct sctp_paramhdr) +
869 (sizeof(uint32_t) * 3);
870 ph = mtod(oper, struct sctp_paramhdr *);
872 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
873 ph->param_length = htons(SCTP_BUF_LEN(oper));
874 ippp = (uint32_t *) (ph + 1);
875 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
877 *ippp = chk->rec.data.TSN_seq;
879 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
882 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
883 sctp_abort_an_association(stcb->sctp_ep, stcb,
884 SCTP_PEER_FAULTY, oper);
886 } else if (asoc->fragmented_delivery_inprogress &&
887 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
889 * We are doing a partial delivery and the
890 * NEXT chunk MUST be either the LAST or
891 * MIDDLE fragment NOT a FIRST
894 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
895 printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
898 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
899 0, M_DONTWAIT, 1, MT_DATA);
901 struct sctp_paramhdr *ph;
905 sizeof(struct sctp_paramhdr) +
906 (3 * sizeof(uint32_t));
907 ph = mtod(oper, struct sctp_paramhdr *);
909 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
910 ph->param_length = htons(SCTP_BUF_LEN(oper));
911 ippp = (uint32_t *) (ph + 1);
912 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
914 *ippp = chk->rec.data.TSN_seq;
916 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
918 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
919 sctp_abort_an_association(stcb->sctp_ep, stcb,
920 SCTP_PEER_FAULTY, oper);
922 } else if (asoc->fragmented_delivery_inprogress) {
924 * Here we are ok with a MIDDLE or LAST
927 if (chk->rec.data.stream_number !=
928 asoc->str_of_pdapi) {
929 /* Got to be the right STR No */
931 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
932 printf("Gak, Evil plot, it IS not same stream number %d vs %d\n",
933 chk->rec.data.stream_number,
937 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
938 0, M_DONTWAIT, 1, MT_DATA);
940 struct sctp_paramhdr *ph;
944 sizeof(struct sctp_paramhdr) +
945 (sizeof(uint32_t) * 3);
947 struct sctp_paramhdr *);
949 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
951 htons(SCTP_BUF_LEN(oper));
952 ippp = (uint32_t *) (ph + 1);
953 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
955 *ippp = chk->rec.data.TSN_seq;
957 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
959 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
960 sctp_abort_an_association(stcb->sctp_ep,
961 stcb, SCTP_PEER_FAULTY, oper);
963 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
964 SCTP_DATA_UNORDERED &&
965 chk->rec.data.stream_seq !=
966 asoc->ssn_of_pdapi) {
967 /* Got to be the right STR Seq */
969 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
970 printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n",
971 chk->rec.data.stream_seq,
975 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
976 0, M_DONTWAIT, 1, MT_DATA);
978 struct sctp_paramhdr *ph;
982 sizeof(struct sctp_paramhdr) +
983 (3 * sizeof(uint32_t));
985 struct sctp_paramhdr *);
987 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
989 htons(SCTP_BUF_LEN(oper));
990 ippp = (uint32_t *) (ph + 1);
991 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
993 *ippp = chk->rec.data.TSN_seq;
995 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
998 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
999 sctp_abort_an_association(stcb->sctp_ep,
1000 stcb, SCTP_PEER_FAULTY, oper);
1007 /* Find its place */
1008 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1009 if (compare_with_wrap(at->rec.data.TSN_seq,
1010 chk->rec.data.TSN_seq, MAX_TSN)) {
1012 * one in queue is bigger than the new one, insert
1016 asoc->size_on_reasm_queue += chk->send_size;
1017 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1019 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1021 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1022 /* Gak, He sent me a duplicate str seq number */
1024 * foo bar, I guess I will just free this new guy,
1025 * should we abort too? FIX ME MAYBE? Or it COULD be
1026 * that the SSN's have wrapped. Maybe I should
1027 * compare to TSN somehow... sigh for now just blow
1031 sctp_m_freem(chk->data);
1034 sctp_free_remote_addr(chk->whoTo);
1035 sctp_free_a_chunk(stcb, chk);
1038 last_flags = at->rec.data.rcv_flags;
1039 last_tsn = at->rec.data.TSN_seq;
1041 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1043 * We are at the end, insert it after this
1046 /* check it first */
1047 asoc->size_on_reasm_queue += chk->send_size;
1048 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1049 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1054 /* Now the audits */
1056 prev_tsn = chk->rec.data.TSN_seq - 1;
1057 if (prev_tsn == prev->rec.data.TSN_seq) {
1059 * Ok the one I am dropping onto the end is the
1060 * NEXT. A bit of valdiation here.
1062 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1063 SCTP_DATA_FIRST_FRAG ||
1064 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1065 SCTP_DATA_MIDDLE_FRAG) {
1067 * Insert chk MUST be a MIDDLE or LAST
1070 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1071 SCTP_DATA_FIRST_FRAG) {
1073 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1074 printf("Prev check - It can be a midlle or last but not a first\n");
1075 printf("Gak, Evil plot, it's a FIRST!\n");
1078 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1079 0, M_DONTWAIT, 1, MT_DATA);
1081 struct sctp_paramhdr *ph;
1084 SCTP_BUF_LEN(oper) =
1085 sizeof(struct sctp_paramhdr) +
1086 (3 * sizeof(uint32_t));
1088 struct sctp_paramhdr *);
1090 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1092 htons(SCTP_BUF_LEN(oper));
1093 ippp = (uint32_t *) (ph + 1);
1094 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1096 *ippp = chk->rec.data.TSN_seq;
1098 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1101 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1102 sctp_abort_an_association(stcb->sctp_ep,
1103 stcb, SCTP_PEER_FAULTY, oper);
1107 if (chk->rec.data.stream_number !=
1108 prev->rec.data.stream_number) {
1110 * Huh, need the correct STR here,
1111 * they must be the same.
1114 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1115 printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1116 chk->rec.data.stream_number,
1117 prev->rec.data.stream_number);
1120 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1121 0, M_DONTWAIT, 1, MT_DATA);
1123 struct sctp_paramhdr *ph;
1126 SCTP_BUF_LEN(oper) =
1127 sizeof(struct sctp_paramhdr) +
1128 (3 * sizeof(uint32_t));
1130 struct sctp_paramhdr *);
1132 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1134 htons(SCTP_BUF_LEN(oper));
1135 ippp = (uint32_t *) (ph + 1);
1136 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1138 *ippp = chk->rec.data.TSN_seq;
1140 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1142 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1143 sctp_abort_an_association(stcb->sctp_ep,
1144 stcb, SCTP_PEER_FAULTY, oper);
1149 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1150 chk->rec.data.stream_seq !=
1151 prev->rec.data.stream_seq) {
1153 * Huh, need the correct STR here,
1154 * they must be the same.
1157 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1158 printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1159 chk->rec.data.stream_seq,
1160 prev->rec.data.stream_seq);
1163 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1164 0, M_DONTWAIT, 1, MT_DATA);
1166 struct sctp_paramhdr *ph;
1169 SCTP_BUF_LEN(oper) =
1170 sizeof(struct sctp_paramhdr) +
1171 (3 * sizeof(uint32_t));
1173 struct sctp_paramhdr *);
1175 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1177 htons(SCTP_BUF_LEN(oper));
1178 ippp = (uint32_t *) (ph + 1);
1179 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1181 *ippp = chk->rec.data.TSN_seq;
1183 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1185 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1186 sctp_abort_an_association(stcb->sctp_ep,
1187 stcb, SCTP_PEER_FAULTY, oper);
1192 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1193 SCTP_DATA_LAST_FRAG) {
1194 /* Insert chk MUST be a FIRST */
1195 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1196 SCTP_DATA_FIRST_FRAG) {
1198 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1199 printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1202 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1203 0, M_DONTWAIT, 1, MT_DATA);
1205 struct sctp_paramhdr *ph;
1208 SCTP_BUF_LEN(oper) =
1209 sizeof(struct sctp_paramhdr) +
1210 (3 * sizeof(uint32_t));
1212 struct sctp_paramhdr *);
1214 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1216 htons(SCTP_BUF_LEN(oper));
1217 ippp = (uint32_t *) (ph + 1);
1218 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1220 *ippp = chk->rec.data.TSN_seq;
1222 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1225 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1226 sctp_abort_an_association(stcb->sctp_ep,
1227 stcb, SCTP_PEER_FAULTY, oper);
1236 post_tsn = chk->rec.data.TSN_seq + 1;
1237 if (post_tsn == next->rec.data.TSN_seq) {
1239 * Ok the one I am inserting ahead of is my NEXT
1240 * one. A bit of valdiation here.
1242 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1243 /* Insert chk MUST be a last fragment */
1244 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1245 != SCTP_DATA_LAST_FRAG) {
1247 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1248 printf("Next chk - Next is FIRST, we must be LAST\n");
1249 printf("Gak, Evil plot, its not a last!\n");
1252 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1253 0, M_DONTWAIT, 1, MT_DATA);
1255 struct sctp_paramhdr *ph;
1258 SCTP_BUF_LEN(oper) =
1259 sizeof(struct sctp_paramhdr) +
1260 (3 * sizeof(uint32_t));
1262 struct sctp_paramhdr *);
1264 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1266 htons(SCTP_BUF_LEN(oper));
1267 ippp = (uint32_t *) (ph + 1);
1268 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1270 *ippp = chk->rec.data.TSN_seq;
1272 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1274 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1275 sctp_abort_an_association(stcb->sctp_ep,
1276 stcb, SCTP_PEER_FAULTY, oper);
1281 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1282 SCTP_DATA_MIDDLE_FRAG ||
1283 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1284 SCTP_DATA_LAST_FRAG) {
1286 * Insert chk CAN be MIDDLE or FIRST NOT
1289 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1290 SCTP_DATA_LAST_FRAG) {
1292 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1293 printf("Next chk - Next is a MIDDLE/LAST\n");
1294 printf("Gak, Evil plot, new prev chunk is a LAST\n");
1297 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1298 0, M_DONTWAIT, 1, MT_DATA);
1300 struct sctp_paramhdr *ph;
1303 SCTP_BUF_LEN(oper) =
1304 sizeof(struct sctp_paramhdr) +
1305 (3 * sizeof(uint32_t));
1307 struct sctp_paramhdr *);
1309 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1311 htons(SCTP_BUF_LEN(oper));
1312 ippp = (uint32_t *) (ph + 1);
1313 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1315 *ippp = chk->rec.data.TSN_seq;
1317 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1320 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1321 sctp_abort_an_association(stcb->sctp_ep,
1322 stcb, SCTP_PEER_FAULTY, oper);
1327 if (chk->rec.data.stream_number !=
1328 next->rec.data.stream_number) {
1330 * Huh, need the correct STR here,
1331 * they must be the same.
1334 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1335 printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1336 chk->rec.data.stream_number,
1337 next->rec.data.stream_number);
1340 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1341 0, M_DONTWAIT, 1, MT_DATA);
1343 struct sctp_paramhdr *ph;
1346 SCTP_BUF_LEN(oper) =
1347 sizeof(struct sctp_paramhdr) +
1348 (3 * sizeof(uint32_t));
1350 struct sctp_paramhdr *);
1352 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1354 htons(SCTP_BUF_LEN(oper));
1355 ippp = (uint32_t *) (ph + 1);
1356 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1358 *ippp = chk->rec.data.TSN_seq;
1360 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1363 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1364 sctp_abort_an_association(stcb->sctp_ep,
1365 stcb, SCTP_PEER_FAULTY, oper);
1370 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1371 chk->rec.data.stream_seq !=
1372 next->rec.data.stream_seq) {
1374 * Huh, need the correct STR here,
1375 * they must be the same.
1378 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1379 printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1380 chk->rec.data.stream_seq,
1381 next->rec.data.stream_seq);
1384 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1385 0, M_DONTWAIT, 1, MT_DATA);
1387 struct sctp_paramhdr *ph;
1390 SCTP_BUF_LEN(oper) =
1391 sizeof(struct sctp_paramhdr) +
1392 (3 * sizeof(uint32_t));
1394 struct sctp_paramhdr *);
1396 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1398 htons(SCTP_BUF_LEN(oper));
1399 ippp = (uint32_t *) (ph + 1);
1400 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1402 *ippp = chk->rec.data.TSN_seq;
1404 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1406 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1407 sctp_abort_an_association(stcb->sctp_ep,
1408 stcb, SCTP_PEER_FAULTY, oper);
1417 /* Do we need to do some delivery? check */
1418 sctp_deliver_reasm_check(stcb, asoc);
1422 * This is an unfortunate routine. It checks to make sure a evil guy is not
1423 * stuffing us full of bad packet fragments. A broken peer could also do this
1424 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1428 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1431 struct sctp_tmit_chunk *at;
1434 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1435 if (compare_with_wrap(TSN_seq,
1436 at->rec.data.TSN_seq, MAX_TSN)) {
1437 /* is it one bigger? */
1438 tsn_est = at->rec.data.TSN_seq + 1;
1439 if (tsn_est == TSN_seq) {
1440 /* yep. It better be a last then */
1441 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1442 SCTP_DATA_LAST_FRAG) {
1444 * Ok this guy belongs next to a guy
1445 * that is NOT last, it should be a
1446 * middle/last, not a complete
1452 * This guy is ok since its a LAST
1453 * and the new chunk is a fully
1454 * self- contained one.
1459 } else if (TSN_seq == at->rec.data.TSN_seq) {
1460 /* Software error since I have a dup? */
1464 * Ok, 'at' is larger than new chunk but does it
1465 * need to be right before it.
1467 tsn_est = TSN_seq + 1;
1468 if (tsn_est == at->rec.data.TSN_seq) {
1469 /* Yep, It better be a first */
1470 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1471 SCTP_DATA_FIRST_FRAG) {
1483 extern unsigned int sctp_max_chunks_on_queue;
1485 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1486 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1487 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1488 int *break_flag, int last_chunk)
1490 /* Process a data chunk */
1491 /* struct sctp_tmit_chunk *chk; */
1492 struct sctp_tmit_chunk *chk;
1496 int need_reasm_check = 0;
1497 uint16_t strmno, strmseq;
1499 struct sctp_queued_to_read *control;
1502 tsn = ntohl(ch->dp.tsn);
1503 #ifdef SCTP_MAP_LOGGING
1504 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
1506 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1507 asoc->cumulative_tsn == tsn) {
1508 /* It is a duplicate */
1509 SCTP_STAT_INCR(sctps_recvdupdata);
1510 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1511 /* Record a dup for the next outbound sack */
1512 asoc->dup_tsns[asoc->numduptsns] = tsn;
1517 /* Calculate the number of TSN's between the base and this TSN */
1518 if (tsn >= asoc->mapping_array_base_tsn) {
1519 gap = tsn - asoc->mapping_array_base_tsn;
1521 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1523 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1524 /* Can't hold the bit in the mapping at max array, toss it */
1527 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1528 if (sctp_expand_mapping_array(asoc)) {
1529 /* Can't expand, drop it */
1533 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1536 /* See if we have received this one already */
1537 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1538 SCTP_STAT_INCR(sctps_recvdupdata);
1539 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1540 /* Record a dup for the next outbound sack */
1541 asoc->dup_tsns[asoc->numduptsns] = tsn;
1544 if (!SCTP_OS_TIMER_PENDING(&asoc->dack_timer.timer)) {
1546 * By starting the timer we assure that we WILL sack
1547 * at the end of the packet when sctp_sack_check
1550 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep,
1556 * Check to see about the GONE flag, duplicates would cause a sack
1557 * to be sent up above
1559 if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1560 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1561 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1564 * wait a minute, this guy is gone, there is no longer a
1565 * receiver. Send peer an ABORT!
1567 struct mbuf *op_err;
1569 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1570 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
1575 * Now before going further we see if there is room. If NOT then we
1576 * MAY let one through only IF this TSN is the one we are waiting
1577 * for on a partial delivery API.
1580 /* now do the tests */
1581 if (((asoc->cnt_on_all_streams +
1582 asoc->cnt_on_reasm_queue +
1583 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1584 (((int)asoc->my_rwnd) <= 0)) {
1586 * When we have NO room in the rwnd we check to make sure
1587 * the reader is doing its job...
1589 if (stcb->sctp_socket->so_rcv.sb_cc) {
1590 /* some to read, wake-up */
1591 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1593 /* now is it in the mapping array of what we have accepted? */
1594 if (compare_with_wrap(tsn,
1595 asoc->highest_tsn_inside_map, MAX_TSN)) {
1597 /* Nope not in the valid range dump it */
1599 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1600 printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n",
1601 (u_long)tsn, (u_long)asoc->my_rwnd,
1602 sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv));
1606 sctp_set_rwnd(stcb, asoc);
1607 if ((asoc->cnt_on_all_streams +
1608 asoc->cnt_on_reasm_queue +
1609 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1610 SCTP_STAT_INCR(sctps_datadropchklmt);
1612 SCTP_STAT_INCR(sctps_datadroprwnd);
1619 strmno = ntohs(ch->dp.stream_id);
1620 if (strmno >= asoc->streamincnt) {
1621 struct sctp_paramhdr *phdr;
1624 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1625 0, M_DONTWAIT, 1, MT_DATA);
1627 /* add some space up front so prepend will work well */
1628 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1629 phdr = mtod(mb, struct sctp_paramhdr *);
1631 * Error causes are just param's and this one has
1632 * two back to back phdr, one with the error type
1633 * and size, the other with the streamid and a rsvd
1635 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1636 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1637 phdr->param_length =
1638 htons(sizeof(struct sctp_paramhdr) * 2);
1640 /* We insert the stream in the type field */
1641 phdr->param_type = ch->dp.stream_id;
1642 /* And set the length to 0 for the rsvd field */
1643 phdr->param_length = 0;
1644 sctp_queue_op_err(stcb, mb);
1646 SCTP_STAT_INCR(sctps_badsid);
1650 * Before we continue lets validate that we are not being fooled by
1651 * an evil attacker. We can only have 4k chunks based on our TSN
1652 * spread allowed by the mapping array 512 * 8 bits, so there is no
1653 * way our stream sequence numbers could have wrapped. We of course
1654 * only validate the FIRST fragment so the bit must be set.
1656 strmseq = ntohs(ch->dp.stream_sequence);
1657 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1658 (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1659 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1660 strmseq, MAX_SEQ) ||
1661 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1662 /* The incoming sseq is behind where we last delivered? */
1664 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1665 printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1667 asoc->strmin[strmno].last_sequence_delivered);
1671 * throw it in the stream so it gets cleaned up in
1672 * association destruction
1674 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1675 0, M_DONTWAIT, 1, MT_DATA);
1677 struct sctp_paramhdr *ph;
1680 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1681 (3 * sizeof(uint32_t));
1682 ph = mtod(oper, struct sctp_paramhdr *);
1683 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1684 ph->param_length = htons(SCTP_BUF_LEN(oper));
1685 ippp = (uint32_t *) (ph + 1);
1686 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1690 *ippp = ((strmno << 16) | strmseq);
1693 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1694 sctp_abort_an_association(stcb->sctp_ep, stcb,
1695 SCTP_PEER_FAULTY, oper);
1699 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1700 if (last_chunk == 0) {
1701 dmbuf = SCTP_M_COPYM(*m,
1702 (offset + sizeof(struct sctp_data_chunk)),
1703 the_len, M_DONTWAIT);
1704 #ifdef SCTP_MBUF_LOGGING
1710 if (SCTP_BUF_IS_EXTENDED(mat)) {
1711 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1713 mat = SCTP_BUF_NEXT(mat);
1718 /* We can steal the last chunk */
1722 /* lop off the top part */
1723 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1724 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1725 l_len = SCTP_BUF_LEN(dmbuf);
1728 * need to count up the size hopefully does not hit
1736 l_len += SCTP_BUF_LEN(lat);
1737 lat = SCTP_BUF_NEXT(lat);
1740 if (l_len > the_len) {
1741 /* Trim the end round bytes off too */
1742 m_adj(dmbuf, -(l_len - the_len));
1745 if (dmbuf == NULL) {
1746 SCTP_STAT_INCR(sctps_nomem);
1749 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1750 asoc->fragmented_delivery_inprogress == 0 &&
1751 TAILQ_EMPTY(&asoc->resetHead) &&
1752 ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) ||
1753 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1754 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1755 /* Candidate for express delivery */
1757 * Its not fragmented, No PD-API is up, Nothing in the
1758 * delivery queue, Its un-ordered OR ordered and the next to
1759 * deliver AND nothing else is stuck on the stream queue,
1760 * And there is room for it in the socket buffer. Lets just
1761 * stuff it up the buffer....
1764 /* It would be nice to avoid this copy if we could :< */
1765 sctp_alloc_a_readq(stcb, control);
1766 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1772 if (control == NULL) {
1773 goto failed_express_del;
1775 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1);
1776 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1777 /* for ordered, bump what we delivered */
1778 asoc->strmin[strmno].last_sequence_delivered++;
1780 SCTP_STAT_INCR(sctps_recvexpress);
1781 #ifdef SCTP_STR_LOGGING
1782 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1783 SCTP_STR_LOG_FROM_EXPRS_DEL);
1786 goto finish_express_del;
1789 /* If we reach here this is a new chunk */
1792 /* Express for fragmented delivery? */
1793 if ((asoc->fragmented_delivery_inprogress) &&
1794 (stcb->asoc.control_pdapi) &&
1795 (asoc->str_of_pdapi == strmno) &&
1796 (asoc->ssn_of_pdapi == strmseq)
1798 control = stcb->asoc.control_pdapi;
1799 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1800 /* Can't be another first? */
1801 goto failed_pdapi_express_del;
1803 if (tsn == (control->sinfo_tsn + 1)) {
1804 /* Yep, we can add it on */
1808 if (ch->ch.chunk_flags & SCTP_DATA_LAST_FRAG) {
1811 cumack = asoc->cumulative_tsn;
1812 if ((cumack + 1) == tsn)
1815 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1817 &stcb->sctp_socket->so_rcv)) {
1818 printf("Append fails end:%d\n", end);
1819 goto failed_pdapi_express_del;
1821 SCTP_STAT_INCR(sctps_recvexpressm);
1822 control->sinfo_tsn = tsn;
1823 asoc->tsn_last_delivered = tsn;
1824 asoc->fragment_flags = ch->ch.chunk_flags;
1825 asoc->tsn_of_pdapi_last_delivered = tsn;
1826 asoc->last_flags_delivered = ch->ch.chunk_flags;
1827 asoc->last_strm_seq_delivered = strmseq;
1828 asoc->last_strm_no_delivered = strmno;
1830 /* clean up the flags and such */
1831 asoc->fragmented_delivery_inprogress = 0;
1832 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1833 asoc->strmin[strmno].last_sequence_delivered++;
1835 stcb->asoc.control_pdapi = NULL;
1836 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1838 * There could be another message
1841 need_reasm_check = 1;
1845 goto finish_express_del;
1848 failed_pdapi_express_del:
1850 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1851 sctp_alloc_a_chunk(stcb, chk);
1853 /* No memory so we drop the chunk */
1854 SCTP_STAT_INCR(sctps_nomem);
1855 if (last_chunk == 0) {
1856 /* we copied it, free the copy */
1857 sctp_m_freem(dmbuf);
1861 chk->rec.data.TSN_seq = tsn;
1862 chk->no_fr_allowed = 0;
1863 chk->rec.data.stream_seq = strmseq;
1864 chk->rec.data.stream_number = strmno;
1865 chk->rec.data.payloadtype = ch->dp.protocol_id;
1866 chk->rec.data.context = stcb->asoc.context;
1867 chk->rec.data.doing_fast_retransmit = 0;
1868 chk->rec.data.rcv_flags = ch->ch.chunk_flags;
1870 chk->send_size = the_len;
1872 atomic_add_int(&net->ref_count, 1);
1875 sctp_alloc_a_readq(stcb, control);
1876 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1882 if (control == NULL) {
1883 /* No memory so we drop the chunk */
1884 SCTP_STAT_INCR(sctps_nomem);
1885 if (last_chunk == 0) {
1886 /* we copied it, free the copy */
1887 sctp_m_freem(dmbuf);
1891 control->length = the_len;
1894 /* Mark it as received */
1895 /* Now queue it where it belongs */
1896 if (control != NULL) {
1897 /* First a sanity check */
1898 if (asoc->fragmented_delivery_inprogress) {
1900 * Ok, we have a fragmented delivery in progress if
1901 * this chunk is next to deliver OR belongs in our
1902 * view to the reassembly, the peer is evil or
1905 uint32_t estimate_tsn;
1907 estimate_tsn = asoc->tsn_last_delivered + 1;
1908 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1909 (estimate_tsn == control->sinfo_tsn)) {
1910 /* Evil/Broke peer */
1911 sctp_m_freem(control->data);
1912 control->data = NULL;
1913 sctp_free_remote_addr(control->whoFrom);
1914 sctp_free_a_readq(stcb, control);
1915 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1916 0, M_DONTWAIT, 1, MT_DATA);
1918 struct sctp_paramhdr *ph;
1921 SCTP_BUF_LEN(oper) =
1922 sizeof(struct sctp_paramhdr) +
1923 (3 * sizeof(uint32_t));
1924 ph = mtod(oper, struct sctp_paramhdr *);
1926 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1927 ph->param_length = htons(SCTP_BUF_LEN(oper));
1928 ippp = (uint32_t *) (ph + 1);
1929 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1933 *ippp = ((strmno << 16) | strmseq);
1935 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1936 sctp_abort_an_association(stcb->sctp_ep, stcb,
1937 SCTP_PEER_FAULTY, oper);
1942 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1943 sctp_m_freem(control->data);
1944 control->data = NULL;
1945 sctp_free_remote_addr(control->whoFrom);
1946 sctp_free_a_readq(stcb, control);
1948 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1949 0, M_DONTWAIT, 1, MT_DATA);
1951 struct sctp_paramhdr *ph;
1954 SCTP_BUF_LEN(oper) =
1955 sizeof(struct sctp_paramhdr) +
1956 (3 * sizeof(uint32_t));
1958 struct sctp_paramhdr *);
1960 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1962 htons(SCTP_BUF_LEN(oper));
1963 ippp = (uint32_t *) (ph + 1);
1964 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1968 *ippp = ((strmno << 16) | strmseq);
1970 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1971 sctp_abort_an_association(stcb->sctp_ep,
1972 stcb, SCTP_PEER_FAULTY, oper);
1979 /* No PDAPI running */
1980 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1982 * Reassembly queue is NOT empty validate
1983 * that this tsn does not need to be in
1984 * reasembly queue. If it does then our peer
1985 * is broken or evil.
1987 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1988 sctp_m_freem(control->data);
1989 control->data = NULL;
1990 sctp_free_remote_addr(control->whoFrom);
1991 sctp_free_a_readq(stcb, control);
1992 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1993 0, M_DONTWAIT, 1, MT_DATA);
1995 struct sctp_paramhdr *ph;
1998 SCTP_BUF_LEN(oper) =
1999 sizeof(struct sctp_paramhdr) +
2000 (3 * sizeof(uint32_t));
2002 struct sctp_paramhdr *);
2004 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2006 htons(SCTP_BUF_LEN(oper));
2007 ippp = (uint32_t *) (ph + 1);
2008 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2012 *ippp = ((strmno << 16) | strmseq);
2014 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2015 sctp_abort_an_association(stcb->sctp_ep,
2016 stcb, SCTP_PEER_FAULTY, oper);
2023 /* ok, if we reach here we have passed the sanity checks */
2024 if (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) {
2025 /* queue directly into socket buffer */
2026 sctp_add_to_readq(stcb->sctp_ep, stcb,
2028 &stcb->sctp_socket->so_rcv, 1);
2031 * Special check for when streams are resetting. We
2032 * could be more smart about this and check the
2033 * actual stream to see if it is not being reset..
2034 * that way we would not create a HOLB when amongst
2035 * streams being reset and those not being reset.
2037 * We take complete messages that have a stream reset
2038 * intervening (aka the TSN is after where our
2039 * cum-ack needs to be) off and put them on a
2040 * pending_reply_queue. The reassembly ones we do
2041 * not have to worry about since they are all sorted
2042 * and proceessed by TSN order. It is only the
2043 * singletons I must worry about.
2045 struct sctp_stream_reset_list *liste;
2047 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2048 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)) ||
2049 (tsn == ntohl(liste->tsn)))
2052 * yep its past where we need to reset... go
2053 * ahead and queue it.
2055 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2057 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2059 struct sctp_queued_to_read *ctlOn;
2060 unsigned char inserted = 0;
2062 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2064 if (compare_with_wrap(control->sinfo_tsn,
2065 ctlOn->sinfo_tsn, MAX_TSN)) {
2066 ctlOn = TAILQ_NEXT(ctlOn, next);
2069 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2074 if (inserted == 0) {
2076 * must be put at end, use
2077 * prevP (all setup from
2078 * loop) to setup nextP.
2080 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2084 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2091 /* Into the re-assembly queue */
2092 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2095 * the assoc is now gone and chk was put onto the
2096 * reasm queue, which has all been freed.
2103 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2104 /* we have a new high score */
2105 asoc->highest_tsn_inside_map = tsn;
2106 #ifdef SCTP_MAP_LOGGING
2107 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2110 if (tsn == (asoc->cumulative_tsn + 1)) {
2111 /* Update cum-ack */
2112 asoc->cumulative_tsn = tsn;
2117 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2118 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2120 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2122 SCTP_STAT_INCR(sctps_recvdata);
2123 /* Set it present please */
2124 #ifdef SCTP_STR_LOGGING
2125 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2127 #ifdef SCTP_MAP_LOGGING
2128 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2129 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2131 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2132 if (need_reasm_check) {
2133 /* Another one waits ? */
2134 sctp_deliver_reasm_check(stcb, asoc);
2139 int8_t sctp_map_lookup_tab[256] = {
2140 -1, 0, -1, 1, -1, 0, -1, 2,
2141 -1, 0, -1, 1, -1, 0, -1, 3,
2142 -1, 0, -1, 1, -1, 0, -1, 2,
2143 -1, 0, -1, 1, -1, 0, -1, 4,
2144 -1, 0, -1, 1, -1, 0, -1, 2,
2145 -1, 0, -1, 1, -1, 0, -1, 3,
2146 -1, 0, -1, 1, -1, 0, -1, 2,
2147 -1, 0, -1, 1, -1, 0, -1, 5,
2148 -1, 0, -1, 1, -1, 0, -1, 2,
2149 -1, 0, -1, 1, -1, 0, -1, 3,
2150 -1, 0, -1, 1, -1, 0, -1, 2,
2151 -1, 0, -1, 1, -1, 0, -1, 4,
2152 -1, 0, -1, 1, -1, 0, -1, 2,
2153 -1, 0, -1, 1, -1, 0, -1, 3,
2154 -1, 0, -1, 1, -1, 0, -1, 2,
2155 -1, 0, -1, 1, -1, 0, -1, 6,
2156 -1, 0, -1, 1, -1, 0, -1, 2,
2157 -1, 0, -1, 1, -1, 0, -1, 3,
2158 -1, 0, -1, 1, -1, 0, -1, 2,
2159 -1, 0, -1, 1, -1, 0, -1, 4,
2160 -1, 0, -1, 1, -1, 0, -1, 2,
2161 -1, 0, -1, 1, -1, 0, -1, 3,
2162 -1, 0, -1, 1, -1, 0, -1, 2,
2163 -1, 0, -1, 1, -1, 0, -1, 5,
2164 -1, 0, -1, 1, -1, 0, -1, 2,
2165 -1, 0, -1, 1, -1, 0, -1, 3,
2166 -1, 0, -1, 1, -1, 0, -1, 2,
2167 -1, 0, -1, 1, -1, 0, -1, 4,
2168 -1, 0, -1, 1, -1, 0, -1, 2,
2169 -1, 0, -1, 1, -1, 0, -1, 3,
2170 -1, 0, -1, 1, -1, 0, -1, 2,
2171 -1, 0, -1, 1, -1, 0, -1, 7,
2176 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2179 * Now we also need to check the mapping array in a couple of ways.
2180 * 1) Did we move the cum-ack point?
2182 struct sctp_association *asoc;
2185 int slide_from, slide_end, lgap, distance;
2187 #ifdef SCTP_MAP_LOGGING
2188 uint32_t old_cumack, old_base, old_highest;
2189 unsigned char aux_array[64];
2192 struct sctp_stream_reset_list *liste;
2197 #ifdef SCTP_MAP_LOGGING
2198 old_cumack = asoc->cumulative_tsn;
2199 old_base = asoc->mapping_array_base_tsn;
2200 old_highest = asoc->highest_tsn_inside_map;
2201 if (asoc->mapping_array_size < 64)
2202 memcpy(aux_array, asoc->mapping_array,
2203 asoc->mapping_array_size);
2205 memcpy(aux_array, asoc->mapping_array, 64);
2209 * We could probably improve this a small bit by calculating the
2210 * offset of the current cum-ack as the starting point.
2214 for (i = 0; i < stcb->asoc.mapping_array_size; i++) {
2215 if (asoc->mapping_array[i] == 0xff) {
2218 /* there is a 0 bit */
2220 at += sctp_map_lookup_tab[asoc->mapping_array[i]];
2224 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + at;
2225 /* at is one off, since in the table a embedded -1 is present */
2228 if (compare_with_wrap(asoc->cumulative_tsn,
2229 asoc->highest_tsn_inside_map,
2232 panic("huh, cumack greater than high-tsn in map");
2234 printf("huh, cumack greater than high-tsn in map - should panic?\n");
2235 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2239 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
2240 /* The complete array was completed by a single FR */
2241 /* higest becomes the cum-ack */
2244 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2245 /* clear the array */
2247 clr = asoc->mapping_array_size;
2249 clr = (at >> 3) + 1;
2251 * this should be the allones case but just in case
2254 if (clr > asoc->mapping_array_size)
2255 clr = asoc->mapping_array_size;
2257 memset(asoc->mapping_array, 0, clr);
2258 /* base becomes one ahead of the cum-ack */
2259 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2260 #ifdef SCTP_MAP_LOGGING
2261 sctp_log_map(old_base, old_cumack, old_highest,
2262 SCTP_MAP_PREPARE_SLIDE);
2263 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2264 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2266 } else if (at >= 8) {
2267 /* we can slide the mapping array down */
2268 /* Calculate the new byte postion we can move down */
2269 slide_from = at >> 3;
2271 * now calculate the ceiling of the move using our highest
2274 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2275 lgap = asoc->highest_tsn_inside_map -
2276 asoc->mapping_array_base_tsn;
2278 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2279 asoc->highest_tsn_inside_map + 1;
2281 slide_end = lgap >> 3;
2282 if (slide_end < slide_from) {
2283 panic("impossible slide");
2285 distance = (slide_end - slide_from) + 1;
2286 #ifdef SCTP_MAP_LOGGING
2287 sctp_log_map(old_base, old_cumack, old_highest,
2288 SCTP_MAP_PREPARE_SLIDE);
2289 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2290 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2292 if (distance + slide_from > asoc->mapping_array_size ||
2295 * Here we do NOT slide forward the array so that
2296 * hopefully when more data comes in to fill it up
2297 * we will be able to slide it forward. Really I
2298 * don't think this should happen :-0
2301 #ifdef SCTP_MAP_LOGGING
2302 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2303 (uint32_t) asoc->mapping_array_size,
2304 SCTP_MAP_SLIDE_NONE);
2309 for (ii = 0; ii < distance; ii++) {
2310 asoc->mapping_array[ii] =
2311 asoc->mapping_array[slide_from + ii];
2313 for (ii = distance; ii <= slide_end; ii++) {
2314 asoc->mapping_array[ii] = 0;
2316 asoc->mapping_array_base_tsn += (slide_from << 3);
2317 #ifdef SCTP_MAP_LOGGING
2318 sctp_log_map(asoc->mapping_array_base_tsn,
2319 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2320 SCTP_MAP_SLIDE_RESULT);
2324 /* check the special flag for stream resets */
2325 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2326 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2327 (asoc->cumulative_tsn == liste->tsn))
2330 * we have finished working through the backlogged TSN's now
2331 * time to reset streams. 1: call reset function. 2: free
2332 * pending_reply space 3: distribute any chunks in
2333 * pending_reply_queue.
2335 struct sctp_queued_to_read *ctl;
2337 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2338 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2340 liste = TAILQ_FIRST(&asoc->resetHead);
2341 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2342 if (ctl && (liste == NULL)) {
2343 /* All can be removed */
2345 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2346 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2350 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2353 /* more than one in queue */
2354 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2356 * if ctl->sinfo_tsn is <= liste->tsn we can
2357 * process it which is the NOT of
2358 * ctl->sinfo_tsn > liste->tsn
2360 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2361 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2365 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2369 * Now service re-assembly to pick up anything that has been
2370 * held on reassembly queue?
2372 sctp_deliver_reasm_check(stcb, asoc);
2375 * Now we need to see if we need to queue a sack or just start the
2376 * timer (if allowed).
2379 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2381 * Ok special case, in SHUTDOWN-SENT case. here we
2382 * maker sure SACK timer is off and instead send a
2383 * SHUTDOWN and a SACK
2385 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2386 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2387 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2389 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2390 sctp_send_sack(stcb);
2394 /* is there a gap now ? */
2395 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2396 stcb->asoc.cumulative_tsn, MAX_TSN);
2399 * CMT DAC algorithm: increase number of packets
2400 * received since last ack
2402 stcb->asoc.cmt_dac_pkts_rcvd++;
2404 if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a
2406 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2408 (stcb->asoc.numduptsns) || /* we have dup's */
2409 (is_a_gap) || /* is still a gap */
2410 (stcb->asoc.delayed_ack == 0) ||
2411 (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) /* timer was up . second
2415 if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) &&
2416 (stcb->asoc.first_ack_sent == 1) &&
2417 (stcb->asoc.numduptsns == 0) &&
2418 (stcb->asoc.delayed_ack) &&
2419 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2422 * CMT DAC algorithm: With CMT,
2423 * delay acks even in the face of
2425 * reordering. Therefore, if acks that
2426 * do not have to be sent because of
2427 * the above reasons, will be
2428 * delayed. That is, acks that would
2429 * have been sent due to gap reports
2430 * will be delayed with DAC. Start
2431 * the delayed ack timer.
2433 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2434 stcb->sctp_ep, stcb, NULL);
2437 * Ok we must build a SACK since the
2438 * timer is pending, we got our
2439 * first packet OR there are gaps or
2442 stcb->asoc.first_ack_sent = 1;
2444 sctp_send_sack(stcb);
2445 /* The sending will stop the timer */
2448 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2449 stcb->sctp_ep, stcb, NULL);
2456 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2458 struct sctp_tmit_chunk *chk;
2462 if (asoc->fragmented_delivery_inprogress) {
2463 sctp_service_reassembly(stcb, asoc);
2465 /* Can we proceed further, i.e. the PD-API is complete */
2466 if (asoc->fragmented_delivery_inprogress) {
2471 * Now is there some other chunk I can deliver from the reassembly
2475 chk = TAILQ_FIRST(&asoc->reasmqueue);
2477 asoc->size_on_reasm_queue = 0;
2478 asoc->cnt_on_reasm_queue = 0;
2481 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2482 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2483 ((nxt_todel == chk->rec.data.stream_seq) ||
2484 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2486 * Yep the first one is here. We setup to start reception,
2487 * by backing down the TSN just in case we can't deliver.
2491 * Before we start though either all of the message should
2492 * be here or 1/4 the socket buffer max or nothing on the
2493 * delivery queue and something can be delivered.
2495 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2496 (tsize > stcb->sctp_ep->partial_delivery_point))) {
2497 asoc->fragmented_delivery_inprogress = 1;
2498 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2499 asoc->str_of_pdapi = chk->rec.data.stream_number;
2500 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2501 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2502 asoc->fragment_flags = chk->rec.data.rcv_flags;
2503 sctp_service_reassembly(stcb, asoc);
2504 if (asoc->fragmented_delivery_inprogress == 0) {
2511 extern int sctp_strict_data_order;
2514 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2515 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2516 struct sctp_nets *net, uint32_t * high_tsn)
2518 struct sctp_data_chunk *ch, chunk_buf;
2519 struct sctp_association *asoc;
2520 int num_chunks = 0; /* number of control chunks processed */
2522 int chk_length, break_flag, last_chunk;
2523 int abort_flag = 0, was_a_gap = 0;
2527 sctp_set_rwnd(stcb, &stcb->asoc);
2530 SCTP_TCB_LOCK_ASSERT(stcb);
2532 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2533 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2534 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
2536 * wait a minute, this guy is gone, there is no longer a
2537 * receiver. Send peer an ABORT!
2539 struct mbuf *op_err;
2541 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2542 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
2545 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2546 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2547 /* there was a gap before this data was processed */
2551 * setup where we got the last DATA packet from for any SACK that
2552 * may need to go out. Don't bump the net. This is done ONLY when a
2553 * chunk is assigned.
2555 asoc->last_data_chunk_from = net;
2558 * Now before we proceed we must figure out if this is a wasted
2559 * cluster... i.e. it is a small packet sent in and yet the driver
2560 * underneath allocated a full cluster for it. If so we must copy it
2561 * to a smaller mbuf and free up the cluster mbuf. This will help
2562 * with cluster starvation.
2564 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2565 /* we only handle mbufs that are singletons.. not chains */
2566 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2568 /* ok lets see if we can copy the data up */
2571 /* get the pointers and copy */
2572 to = mtod(m, caddr_t *);
2573 from = mtod((*mm), caddr_t *);
2574 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2575 /* copy the length and free up the old */
2576 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2578 /* sucess, back copy */
2581 /* We are in trouble in the mbuf world .. yikes */
2585 /* get pointer to the first chunk header */
2586 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2587 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2592 * process all DATA chunks...
2594 *high_tsn = asoc->cumulative_tsn;
2596 while (stop_proc == 0) {
2597 /* validate chunk length */
2598 chk_length = ntohs(ch->ch.chunk_length);
2599 if (length - *offset < chk_length) {
2600 /* all done, mutulated chunk */
2604 if (ch->ch.chunk_type == SCTP_DATA) {
2605 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2607 * Need to send an abort since we had a
2608 * invalid data chunk.
2610 struct mbuf *op_err;
2612 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2613 0, M_DONTWAIT, 1, MT_DATA);
2616 struct sctp_paramhdr *ph;
2619 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2620 (2 * sizeof(uint32_t));
2621 ph = mtod(op_err, struct sctp_paramhdr *);
2623 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2624 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2625 ippp = (uint32_t *) (ph + 1);
2626 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2628 *ippp = asoc->cumulative_tsn;
2631 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2632 sctp_abort_association(inp, stcb, m, iphlen, sh,
2636 #ifdef SCTP_AUDITING_ENABLED
2637 sctp_audit_log(0xB1, 0);
2639 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2644 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2645 chk_length, net, high_tsn, &abort_flag, &break_flag,
2654 * Set because of out of rwnd space and no
2655 * drop rep space left.
2661 /* not a data chunk in the data region */
2662 switch (ch->ch.chunk_type) {
2663 case SCTP_INITIATION:
2664 case SCTP_INITIATION_ACK:
2665 case SCTP_SELECTIVE_ACK:
2666 case SCTP_HEARTBEAT_REQUEST:
2667 case SCTP_HEARTBEAT_ACK:
2668 case SCTP_ABORT_ASSOCIATION:
2670 case SCTP_SHUTDOWN_ACK:
2671 case SCTP_OPERATION_ERROR:
2672 case SCTP_COOKIE_ECHO:
2673 case SCTP_COOKIE_ACK:
2676 case SCTP_SHUTDOWN_COMPLETE:
2677 case SCTP_AUTHENTICATION:
2678 case SCTP_ASCONF_ACK:
2679 case SCTP_PACKET_DROPPED:
2680 case SCTP_STREAM_RESET:
2681 case SCTP_FORWARD_CUM_TSN:
2684 * Now, what do we do with KNOWN chunks that
2685 * are NOT in the right place?
2687 * For now, I do nothing but ignore them. We
2688 * may later want to add sysctl stuff to
2689 * switch out and do either an ABORT() or
2690 * possibly process them.
2692 if (sctp_strict_data_order) {
2693 struct mbuf *op_err;
2695 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2696 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err);
2701 /* unknown chunk type, use bit rules */
2702 if (ch->ch.chunk_type & 0x40) {
2703 /* Add a error report to the queue */
2705 struct sctp_paramhdr *phd;
2707 mm = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2709 phd = mtod(mm, struct sctp_paramhdr *);
2711 * We cheat and use param
2712 * type since we did not
2713 * bother to define a error
2714 * cause struct. They are
2715 * the same basic format
2716 * with different names.
2719 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2721 htons(chk_length + sizeof(*phd));
2722 SCTP_BUF_LEN(mm) = sizeof(*phd);
2723 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset,
2724 SCTP_SIZE32(chk_length),
2726 if (SCTP_BUF_NEXT(mm)) {
2727 sctp_queue_op_err(stcb, mm);
2733 if ((ch->ch.chunk_type & 0x80) == 0) {
2734 /* discard the rest of this packet */
2736 } /* else skip this bad chunk and
2739 }; /* switch of chunk type */
2741 *offset += SCTP_SIZE32(chk_length);
2742 if ((*offset >= length) || stop_proc) {
2743 /* no more data left in the mbuf chain */
2747 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2748 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2758 * we need to report rwnd overrun drops.
2760 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2764 * Did we get data, if so update the time for auto-close and
2765 * give peer credit for being alive.
2767 SCTP_STAT_INCR(sctps_recvpktwithdata);
2768 stcb->asoc.overall_error_count = 0;
2769 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2771 /* now service all of the reassm queue if needed */
2772 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2773 sctp_service_queues(stcb, asoc);
2775 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2777 * Assure that we ack right away by making sure that a d-ack
2778 * timer is running. So the sack_check will send a sack.
2780 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb,
2783 /* Start a sack timer or QUEUE a SACK for sending */
2784 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
2785 (stcb->asoc.first_ack_sent)) {
2786 /* Everything is in order */
2787 if (stcb->asoc.mapping_array[0] == 0xff) {
2788 /* need to do the slide */
2789 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2791 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2792 stcb->asoc.first_ack_sent = 1;
2793 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2794 sctp_send_sack(stcb);
2796 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2797 stcb->sctp_ep, stcb, NULL);
2801 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2810 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
2811 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2812 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2813 int num_seg, int *ecn_seg_sums)
2815 /************************************************/
2816 /* process fragments and update sendqueue */
2817 /************************************************/
2818 struct sctp_sack *sack;
2819 struct sctp_gap_ack_block *frag;
2820 struct sctp_tmit_chunk *tp1;
2824 #ifdef SCTP_FR_LOGGING
2828 uint16_t frag_strt, frag_end, primary_flag_set;
2829 u_long last_frag_high;
2832 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
2834 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2835 primary_flag_set = 1;
2837 primary_flag_set = 0;
2841 frag = (struct sctp_gap_ack_block *)((caddr_t)sack +
2842 sizeof(struct sctp_sack));
2845 for (i = 0; i < num_seg; i++) {
2846 frag_strt = ntohs(frag->start);
2847 frag_end = ntohs(frag->end);
2848 /* some sanity checks on the fargment offsets */
2849 if (frag_strt > frag_end) {
2850 /* this one is malformed, skip */
2854 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
2856 *biggest_tsn_acked = frag_end + last_tsn;
2858 /* mark acked dgs and find out the highestTSN being acked */
2860 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2862 /* save the locations of the last frags */
2863 last_frag_high = frag_end + last_tsn;
2866 * now lets see if we need to reset the queue due to
2867 * a out-of-order SACK fragment
2869 if (compare_with_wrap(frag_strt + last_tsn,
2870 last_frag_high, MAX_TSN)) {
2872 * if the new frag starts after the last TSN
2873 * frag covered, we are ok and this one is
2874 * beyond the last one
2879 * ok, they have reset us, so we need to
2880 * reset the queue this will cause extra
2881 * hunting but hey, they chose the
2882 * performance hit when they failed to order
2885 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2887 last_frag_high = frag_end + last_tsn;
2889 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2891 #ifdef SCTP_FR_LOGGING
2892 if (tp1->rec.data.doing_fast_retransmit)
2897 * CMT: CUCv2 algorithm. For each TSN being
2898 * processed from the sent queue, track the
2899 * next expected pseudo-cumack, or
2900 * rtx_pseudo_cumack, if required. Separate
2901 * cumack trackers for first transmissions,
2902 * and retransmissions.
2904 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2905 (tp1->snd_count == 1)) {
2906 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2907 tp1->whoTo->find_pseudo_cumack = 0;
2909 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2910 (tp1->snd_count > 1)) {
2911 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2912 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2914 if (tp1->rec.data.TSN_seq == j) {
2915 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2917 * must be held until
2921 * ECN Nonce: Add the nonce
2922 * value to the sender's
2925 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
2938 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2939 *biggest_newly_acked_tsn, MAX_TSN)) {
2940 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2949 * this_sack_highest_
2953 if (tp1->rec.data.chunk_was_revoked == 0)
2954 tp1->whoTo->saw_newack = 1;
2956 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2957 tp1->whoTo->this_sack_highest_newack,
2959 tp1->whoTo->this_sack_highest_newack =
2960 tp1->rec.data.TSN_seq;
2965 * this_sack_lowest_n
2968 if (*this_sack_lowest_newack == 0) {
2969 #ifdef SCTP_SACK_LOGGING
2970 sctp_log_sack(*this_sack_lowest_newack,
2972 tp1->rec.data.TSN_seq,
2975 SCTP_LOG_TSN_ACKED);
2977 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2982 * (rtx-)pseudo-cumac
2987 * (rtx-)pseudo-cumac
2989 * new_(rtx_)pseudo_c
2997 * (rtx-)pseudo-cumac
3005 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
3006 if (tp1->rec.data.chunk_was_revoked == 0) {
3007 tp1->whoTo->new_pseudo_cumack = 1;
3009 tp1->whoTo->find_pseudo_cumack = 1;
3011 #ifdef SCTP_CWND_LOGGING
3012 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3014 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
3015 if (tp1->rec.data.chunk_was_revoked == 0) {
3016 tp1->whoTo->new_pseudo_cumack = 1;
3018 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3020 #ifdef SCTP_SACK_LOGGING
3021 sctp_log_sack(*biggest_newly_acked_tsn,
3023 tp1->rec.data.TSN_seq,
3026 SCTP_LOG_TSN_ACKED);
3028 #ifdef SCTP_FLIGHT_LOGGING
3029 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
3030 tp1->whoTo->flight_size,
3033 tp1->rec.data.TSN_seq);
3035 if (tp1->whoTo->flight_size >= tp1->book_size)
3036 tp1->whoTo->flight_size -= tp1->book_size;
3038 tp1->whoTo->flight_size = 0;
3039 if (asoc->total_flight >= tp1->book_size) {
3040 asoc->total_flight -= tp1->book_size;
3041 if (asoc->total_flight_count > 0)
3042 asoc->total_flight_count--;
3044 asoc->total_flight = 0;
3045 asoc->total_flight_count = 0;
3048 tp1->whoTo->net_ack += tp1->send_size;
3050 if (tp1->snd_count < 2) {
3056 tp1->whoTo->net_ack2 += tp1->send_size;
3063 sctp_calculate_rto(stcb,
3066 &tp1->sent_rcv_time);
3067 tp1->whoTo->rto_pending = 0;
3072 if (tp1->sent <= SCTP_DATAGRAM_RESEND &&
3073 tp1->sent != SCTP_DATAGRAM_UNSENT &&
3074 compare_with_wrap(tp1->rec.data.TSN_seq,
3075 asoc->this_sack_highest_gap,
3077 asoc->this_sack_highest_gap =
3078 tp1->rec.data.TSN_seq;
3080 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3081 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3082 #ifdef SCTP_AUDITING_ENABLED
3083 sctp_audit_log(0xB2,
3084 (asoc->sent_queue_retran_cnt & 0x000000ff));
3088 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3089 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3091 tp1->sent = SCTP_DATAGRAM_MARKED;
3094 } /* if (tp1->TSN_seq == j) */
3095 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
3099 tp1 = TAILQ_NEXT(tp1, sctp_next);
3100 } /* end while (tp1) */
3101 } /* end for (j = fragStart */
3102 frag++; /* next one */
3104 #ifdef SCTP_FR_LOGGING
3106 * if (num_frs) sctp_log_fr(*biggest_tsn_acked,
3107 * *biggest_newly_acked_tsn, last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3113 sctp_check_for_revoked(struct sctp_association *asoc, uint32_t cumack,
3114 u_long biggest_tsn_acked)
3116 struct sctp_tmit_chunk *tp1;
3117 int tot_revoked = 0;
3119 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3121 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3124 * ok this guy is either ACK or MARKED. If it is
3125 * ACKED it has been previously acked but not this
3126 * time i.e. revoked. If it is MARKED it was ACK'ed
3129 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3130 /* it has been revoked */
3131 tp1->sent = SCTP_DATAGRAM_SENT;
3132 tp1->rec.data.chunk_was_revoked = 1;
3134 * We must add this stuff back in to assure
3135 * timers and such get started.
3137 tp1->whoTo->flight_size += tp1->book_size;
3138 asoc->total_flight_count++;
3139 asoc->total_flight += tp1->book_size;
3141 #ifdef SCTP_SACK_LOGGING
3142 sctp_log_sack(asoc->last_acked_seq,
3144 tp1->rec.data.TSN_seq,
3147 SCTP_LOG_TSN_REVOKED);
3149 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3150 /* it has been re-acked in this SACK */
3151 tp1->sent = SCTP_DATAGRAM_ACKED;
3154 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3156 tp1 = TAILQ_NEXT(tp1, sctp_next);
3158 if (tot_revoked > 0) {
3160 * Setup the ecn nonce re-sync point. We do this since once
3161 * data is revoked we begin to retransmit things, which do
3162 * NOT have the ECN bits set. This means we are now out of
3163 * sync and must wait until we get back in sync with the
3164 * peer to check ECN bits.
3166 tp1 = TAILQ_FIRST(&asoc->send_queue);
3168 asoc->nonce_resync_tsn = asoc->sending_seq;
3170 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3172 asoc->nonce_wait_for_ecne = 0;
3173 asoc->nonce_sum_check = 0;
3177 extern int sctp_peer_chunk_oh;
3180 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3181 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3183 struct sctp_tmit_chunk *tp1;
3184 int strike_flag = 0;
3186 int tot_retrans = 0;
3187 uint32_t sending_seq;
3188 struct sctp_nets *net;
3189 int num_dests_sacked = 0;
3192 * select the sending_seq, this is either the next thing ready to be
3193 * sent but not transmitted, OR, the next seq we assign.
3195 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3197 sending_seq = asoc->sending_seq;
3199 sending_seq = tp1->rec.data.TSN_seq;
3202 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3203 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3204 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3205 if (net->saw_newack)
3209 if (stcb->asoc.peer_supports_prsctp) {
3210 SCTP_GETTIME_TIMEVAL(&now);
3212 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3215 if (tp1->no_fr_allowed) {
3216 /* this one had a timeout or something */
3217 tp1 = TAILQ_NEXT(tp1, sctp_next);
3220 #ifdef SCTP_FR_LOGGING
3221 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3222 sctp_log_fr(biggest_tsn_newly_acked,
3223 tp1->rec.data.TSN_seq,
3225 SCTP_FR_LOG_CHECK_STRIKE);
3227 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3229 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3233 if (stcb->asoc.peer_supports_prsctp) {
3234 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3235 /* Is it expired? */
3236 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3237 /* Yes so drop it */
3238 if (tp1->data != NULL) {
3239 sctp_release_pr_sctp_chunk(stcb, tp1,
3240 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3243 tp1 = TAILQ_NEXT(tp1, sctp_next);
3247 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3248 /* Has it been retransmitted tv_sec times? */
3249 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3250 /* Yes, so drop it */
3251 if (tp1->data != NULL) {
3252 sctp_release_pr_sctp_chunk(stcb, tp1,
3253 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3256 tp1 = TAILQ_NEXT(tp1, sctp_next);
3261 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3262 asoc->this_sack_highest_gap, MAX_TSN)) {
3263 /* we are beyond the tsn in the sack */
3266 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3267 /* either a RESEND, ACKED, or MARKED */
3269 tp1 = TAILQ_NEXT(tp1, sctp_next);
3273 * CMT : SFR algo (covers part of DAC and HTNA as well)
3275 if (tp1->whoTo->saw_newack == 0) {
3277 * No new acks were receieved for data sent to this
3278 * dest. Therefore, according to the SFR algo for
3279 * CMT, no data sent to this dest can be marked for
3280 * FR using this SACK. (iyengar@cis.udel.edu,
3283 tp1 = TAILQ_NEXT(tp1, sctp_next);
3285 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3286 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3288 * CMT: New acks were receieved for data sent to
3289 * this dest. But no new acks were seen for data
3290 * sent after tp1. Therefore, according to the SFR
3291 * algo for CMT, tp1 cannot be marked for FR using
3292 * this SACK. This step covers part of the DAC algo
3293 * and the HTNA algo as well.
3295 tp1 = TAILQ_NEXT(tp1, sctp_next);
3299 * Here we check to see if we were have already done a FR
3300 * and if so we see if the biggest TSN we saw in the sack is
3301 * smaller than the recovery point. If so we don't strike
3302 * the tsn... otherwise we CAN strike the TSN.
3305 * @@@ JRI: Check for CMT
3307 if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3309 * Strike the TSN if in fast-recovery and cum-ack
3312 #ifdef SCTP_FR_LOGGING
3313 sctp_log_fr(biggest_tsn_newly_acked,
3314 tp1->rec.data.TSN_seq,
3316 SCTP_FR_LOG_STRIKE_CHUNK);
3319 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3321 * CMT DAC algorithm: If SACK flag is set to
3322 * 0, then lowest_newack test will not pass
3323 * because it would have been set to the
3324 * cumack earlier. If not already to be
3325 * rtx'd, If not a mixed sack and if tp1 is
3326 * not between two sacked TSNs, then mark by
3329 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3330 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3331 #ifdef SCTP_FR_LOGGING
3332 sctp_log_fr(16 + num_dests_sacked,
3333 tp1->rec.data.TSN_seq,
3335 SCTP_FR_LOG_STRIKE_CHUNK);
3340 } else if (tp1->rec.data.doing_fast_retransmit) {
3342 * For those that have done a FR we must take
3343 * special consideration if we strike. I.e the
3344 * biggest_newly_acked must be higher than the
3345 * sending_seq at the time we did the FR.
3347 #ifdef SCTP_FR_TO_ALTERNATE
3349 * If FR's go to new networks, then we must only do
3350 * this for singly homed asoc's. However if the FR's
3351 * go to the same network (Armando's work) then its
3352 * ok to FR multiple times.
3354 if (asoc->numnets < 2)
3359 if ((compare_with_wrap(biggest_tsn_newly_acked,
3360 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3361 (biggest_tsn_newly_acked ==
3362 tp1->rec.data.fast_retran_tsn)) {
3364 * Strike the TSN, since this ack is
3365 * beyond where things were when we
3368 #ifdef SCTP_FR_LOGGING
3369 sctp_log_fr(biggest_tsn_newly_acked,
3370 tp1->rec.data.TSN_seq,
3372 SCTP_FR_LOG_STRIKE_CHUNK);
3376 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3378 * CMT DAC algorithm: If
3379 * SACK flag is set to 0,
3380 * then lowest_newack test
3381 * will not pass because it
3382 * would have been set to
3383 * the cumack earlier. If
3384 * not already to be rtx'd,
3385 * If not a mixed sack and
3386 * if tp1 is not between two
3387 * sacked TSNs, then mark by
3390 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3391 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3392 #ifdef SCTP_FR_LOGGING
3393 sctp_log_fr(32 + num_dests_sacked,
3394 tp1->rec.data.TSN_seq,
3396 SCTP_FR_LOG_STRIKE_CHUNK);
3404 * @@@ JRI: TODO: remove code for HTNA algo. CMT's
3405 * SFR algo covers HTNA.
3407 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3408 biggest_tsn_newly_acked, MAX_TSN)) {
3410 * We don't strike these: This is the HTNA
3411 * algorithm i.e. we don't strike If our TSN is
3412 * larger than the Highest TSN Newly Acked.
3416 /* Strike the TSN */
3417 #ifdef SCTP_FR_LOGGING
3418 sctp_log_fr(biggest_tsn_newly_acked,
3419 tp1->rec.data.TSN_seq,
3421 SCTP_FR_LOG_STRIKE_CHUNK);
3424 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3426 * CMT DAC algorithm: If SACK flag is set to
3427 * 0, then lowest_newack test will not pass
3428 * because it would have been set to the
3429 * cumack earlier. If not already to be
3430 * rtx'd, If not a mixed sack and if tp1 is
3431 * not between two sacked TSNs, then mark by
3434 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3435 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3436 #ifdef SCTP_FR_LOGGING
3437 sctp_log_fr(48 + num_dests_sacked,
3438 tp1->rec.data.TSN_seq,
3440 SCTP_FR_LOG_STRIKE_CHUNK);
3446 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3447 /* Increment the count to resend */
3448 struct sctp_nets *alt;
3450 /* printf("OK, we are now ready to FR this guy\n"); */
3451 #ifdef SCTP_FR_LOGGING
3452 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3456 /* This is a subsequent FR */
3457 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3459 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3461 if (sctp_cmt_on_off) {
3463 * CMT: Using RTX_SSTHRESH policy for CMT.
3464 * If CMT is being used, then pick dest with
3465 * largest ssthresh for any retransmission.
3466 * (iyengar@cis.udel.edu, 2005/08/12)
3468 tp1->no_fr_allowed = 1;
3470 alt = sctp_find_alternate_net(stcb, alt, 1);
3472 * CUCv2: If a different dest is picked for
3473 * the retransmission, then new
3474 * (rtx-)pseudo_cumack needs to be tracked
3475 * for orig dest. Let CUCv2 track new (rtx-)
3476 * pseudo-cumack always.
3478 tp1->whoTo->find_pseudo_cumack = 1;
3479 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3482 } else {/* CMT is OFF */
3484 #ifdef SCTP_FR_TO_ALTERNATE
3485 /* Can we find an alternate? */
3486 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3489 * default behavior is to NOT retransmit
3490 * FR's to an alternate. Armando Caro's
3491 * paper details why.
3497 tp1->rec.data.doing_fast_retransmit = 1;
3499 /* mark the sending seq for possible subsequent FR's */
3501 * printf("Marking TSN for FR new value %x\n",
3502 * (uint32_t)tpi->rec.data.TSN_seq);
3504 if (TAILQ_EMPTY(&asoc->send_queue)) {
3506 * If the queue of send is empty then its
3507 * the next sequence number that will be
3508 * assigned so we subtract one from this to
3509 * get the one we last sent.
3511 tp1->rec.data.fast_retran_tsn = sending_seq;
3514 * If there are chunks on the send queue
3515 * (unsent data that has made it from the
3516 * stream queues but not out the door, we
3517 * take the first one (which will have the
3518 * lowest TSN) and subtract one to get the
3521 struct sctp_tmit_chunk *ttt;
3523 ttt = TAILQ_FIRST(&asoc->send_queue);
3524 tp1->rec.data.fast_retran_tsn =
3525 ttt->rec.data.TSN_seq;
3530 * this guy had a RTO calculation pending on
3533 tp1->whoTo->rto_pending = 0;
3536 /* fix counts and things */
3537 #ifdef SCTP_FLIGHT_LOGGING
3538 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
3539 tp1->whoTo->flight_size,
3542 tp1->rec.data.TSN_seq);
3544 tp1->whoTo->net_ack++;
3545 if (tp1->whoTo->flight_size >= tp1->book_size)
3546 tp1->whoTo->flight_size -= tp1->book_size;
3548 tp1->whoTo->flight_size = 0;
3550 #ifdef SCTP_LOG_RWND
3551 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3552 asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh);
3554 /* add back to the rwnd */
3555 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
3557 /* remove from the total flight */
3558 if (asoc->total_flight >= tp1->book_size) {
3559 asoc->total_flight -= tp1->book_size;
3560 if (asoc->total_flight_count > 0)
3561 asoc->total_flight_count--;
3563 asoc->total_flight = 0;
3564 asoc->total_flight_count = 0;
3568 if (alt != tp1->whoTo) {
3569 /* yes, there is an alternate. */
3570 sctp_free_remote_addr(tp1->whoTo);
3572 atomic_add_int(&alt->ref_count, 1);
3575 tp1 = TAILQ_NEXT(tp1, sctp_next);
3578 if (tot_retrans > 0) {
3580 * Setup the ecn nonce re-sync point. We do this since once
3581 * we go to FR something we introduce a Karn's rule scenario
3582 * and won't know the totals for the ECN bits.
3584 asoc->nonce_resync_tsn = sending_seq;
3585 asoc->nonce_wait_for_ecne = 0;
3586 asoc->nonce_sum_check = 0;
3590 struct sctp_tmit_chunk *
3591 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3592 struct sctp_association *asoc)
3594 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3598 if (asoc->peer_supports_prsctp == 0) {
3601 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3603 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3604 tp1->sent != SCTP_DATAGRAM_RESEND) {
3605 /* no chance to advance, out of here */
3608 if (!PR_SCTP_ENABLED(tp1->flags)) {
3610 * We can't fwd-tsn past any that are reliable aka
3611 * retransmitted until the asoc fails.
3616 SCTP_GETTIME_TIMEVAL(&now);
3619 tp2 = TAILQ_NEXT(tp1, sctp_next);
3621 * now we got a chunk which is marked for another
3622 * retransmission to a PR-stream but has run out its chances
3623 * already maybe OR has been marked to skip now. Can we skip
3624 * it if its a resend?
3626 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3627 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3629 * Now is this one marked for resend and its time is
3632 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3633 /* Yes so drop it */
3635 sctp_release_pr_sctp_chunk(stcb, tp1,
3636 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3641 * No, we are done when hit one for resend
3642 * whos time as not expired.
3648 * Ok now if this chunk is marked to drop it we can clean up
3649 * the chunk, advance our peer ack point and we can check
3652 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3653 /* advance PeerAckPoint goes forward */
3654 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3657 * we don't want to de-queue it here. Just wait for
3658 * the next peer SACK to come with a new cumTSN and
3659 * then the chunk will be droped in the normal
3663 sctp_free_bufspace(stcb, asoc, tp1, 1);
3665 * Maybe there should be another
3668 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3669 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3671 sctp_m_freem(tp1->data);
3673 if (stcb->sctp_socket) {
3674 sctp_sowwakeup(stcb->sctp_ep,
3676 #ifdef SCTP_WAKE_LOGGING
3677 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN);
3683 * If it is still in RESEND we can advance no
3689 * If we hit here we just dumped tp1, move to next tsn on
3697 #ifdef SCTP_HIGH_SPEED
3698 struct sctp_hs_raise_drop {
3701 int32_t drop_percent;
3704 #define SCTP_HS_TABLE_SIZE 73
3706 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
3707 {38, 1, 50}, /* 0 */
3708 {118, 2, 44}, /* 1 */
3709 {221, 3, 41}, /* 2 */
3710 {347, 4, 38}, /* 3 */
3711 {495, 5, 37}, /* 4 */
3712 {663, 6, 35}, /* 5 */
3713 {851, 7, 34}, /* 6 */
3714 {1058, 8, 33}, /* 7 */
3715 {1284, 9, 32}, /* 8 */
3716 {1529, 10, 31}, /* 9 */
3717 {1793, 11, 30}, /* 10 */
3718 {2076, 12, 29}, /* 11 */
3719 {2378, 13, 28}, /* 12 */
3720 {2699, 14, 28}, /* 13 */
3721 {3039, 15, 27}, /* 14 */
3722 {3399, 16, 27}, /* 15 */
3723 {3778, 17, 26}, /* 16 */
3724 {4177, 18, 26}, /* 17 */
3725 {4596, 19, 25}, /* 18 */
3726 {5036, 20, 25}, /* 19 */
3727 {5497, 21, 24}, /* 20 */
3728 {5979, 22, 24}, /* 21 */
3729 {6483, 23, 23}, /* 22 */
3730 {7009, 24, 23}, /* 23 */
3731 {7558, 25, 22}, /* 24 */
3732 {8130, 26, 22}, /* 25 */
3733 {8726, 27, 22}, /* 26 */
3734 {9346, 28, 21}, /* 27 */
3735 {9991, 29, 21}, /* 28 */
3736 {10661, 30, 21}, /* 29 */
3737 {11358, 31, 20}, /* 30 */
3738 {12082, 32, 20}, /* 31 */
3739 {12834, 33, 20}, /* 32 */
3740 {13614, 34, 19}, /* 33 */
3741 {14424, 35, 19}, /* 34 */
3742 {15265, 36, 19}, /* 35 */
3743 {16137, 37, 19}, /* 36 */
3744 {17042, 38, 18}, /* 37 */
3745 {17981, 39, 18}, /* 38 */
3746 {18955, 40, 18}, /* 39 */
3747 {19965, 41, 17}, /* 40 */
3748 {21013, 42, 17}, /* 41 */
3749 {22101, 43, 17}, /* 42 */
3750 {23230, 44, 17}, /* 43 */
3751 {24402, 45, 16}, /* 44 */
3752 {25618, 46, 16}, /* 45 */
3753 {26881, 47, 16}, /* 46 */
3754 {28193, 48, 16}, /* 47 */
3755 {29557, 49, 15}, /* 48 */
3756 {30975, 50, 15}, /* 49 */
3757 {32450, 51, 15}, /* 50 */
3758 {33986, 52, 15}, /* 51 */
3759 {35586, 53, 14}, /* 52 */
3760 {37253, 54, 14}, /* 53 */
3761 {38992, 55, 14}, /* 54 */
3762 {40808, 56, 14}, /* 55 */
3763 {42707, 57, 13}, /* 56 */
3764 {44694, 58, 13}, /* 57 */
3765 {46776, 59, 13}, /* 58 */
3766 {48961, 60, 13}, /* 59 */
3767 {51258, 61, 13}, /* 60 */
3768 {53677, 62, 12}, /* 61 */
3769 {56230, 63, 12}, /* 62 */
3770 {58932, 64, 12}, /* 63 */
3771 {61799, 65, 12}, /* 64 */
3772 {64851, 66, 11}, /* 65 */
3773 {68113, 67, 11}, /* 66 */
3774 {71617, 68, 11}, /* 67 */
3775 {75401, 69, 10}, /* 68 */
3776 {79517, 70, 10}, /* 69 */
3777 {84035, 71, 10}, /* 70 */
3778 {89053, 72, 10}, /* 71 */
3779 {94717, 73, 9} /* 72 */
3783 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
3785 int cur_val, i, indx, incr;
3787 cur_val = net->cwnd >> 10;
3788 indx = SCTP_HS_TABLE_SIZE - 1;
3790 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3792 if (net->net_ack > net->mtu) {
3793 net->cwnd += net->mtu;
3794 #ifdef SCTP_CWND_MONITOR
3795 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS);
3798 net->cwnd += net->net_ack;
3799 #ifdef SCTP_CWND_MONITOR
3800 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
3804 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
3805 if (cur_val < sctp_cwnd_adjust[i].cwnd) {
3810 net->last_hs_used = indx;
3811 incr = ((sctp_cwnd_adjust[indx].increase) << 10);
3813 #ifdef SCTP_CWND_MONITOR
3814 sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS);
3820 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
3822 int cur_val, i, indx;
3824 #ifdef SCTP_CWND_MONITOR
3825 int old_cwnd = net->cwnd;
3829 cur_val = net->cwnd >> 10;
3830 indx = net->last_hs_used;
3831 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3833 net->ssthresh = net->cwnd / 2;
3834 if (net->ssthresh < (net->mtu * 2)) {
3835 net->ssthresh = 2 * net->mtu;
3837 net->cwnd = net->ssthresh;
3839 /* drop by the proper amount */
3840 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
3841 sctp_cwnd_adjust[net->last_hs_used].drop_percent);
3842 net->cwnd = net->ssthresh;
3843 /* now where are we */
3844 indx = net->last_hs_used;
3845 cur_val = net->cwnd >> 10;
3846 /* reset where we are in the table */
3847 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3848 /* feel out of hs */
3849 net->last_hs_used = 0;
3851 for (i = indx; i >= 1; i--) {
3852 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
3856 net->last_hs_used = indx;
3859 #ifdef SCTP_CWND_MONITOR
3860 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
3867 extern int sctp_early_fr;
3868 extern int sctp_L2_abc_variable;
3871 static __inline void
3872 sctp_cwnd_update(struct sctp_tcb *stcb,
3873 struct sctp_association *asoc,
3874 int accum_moved, int reneged_all, int will_exit)
3876 struct sctp_nets *net;
3878 /******************************/
3879 /* update cwnd and Early FR */
3880 /******************************/
3881 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3882 #ifdef JANA_CODE_WHY_THIS
3884 * CMT fast recovery code. Need to debug.
3886 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
3887 if (compare_with_wrap(asoc->last_acked_seq,
3888 net->fast_recovery_tsn, MAX_TSN) ||
3889 (asoc->last_acked_seq == net->fast_recovery_tsn) ||
3890 compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
3891 (net->pseudo_cumack == net->fast_recovery_tsn)) {
3892 net->will_exit_fast_recovery = 1;
3896 if (sctp_early_fr) {
3898 * So, first of all do we need to have a Early FR
3901 if (((TAILQ_FIRST(&asoc->sent_queue)) &&
3902 (net->ref_count > 1) &&
3903 (net->flight_size < net->cwnd)) ||
3906 * yes, so in this case stop it if its
3907 * running, and then restart it. Reneging
3908 * all is a special case where we want to
3909 * run the Early FR timer and then force the
3910 * last few unacked to be sent, causing us
3911 * to illicit a sack with gaps to force out
3914 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
3915 SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
3916 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
3917 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
3919 SCTP_STAT_INCR(sctps_earlyfrstrid);
3920 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
3922 /* No, stop it if its running */
3923 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
3924 SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
3925 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
3926 SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
3930 /* if nothing was acked on this destination skip it */
3931 if (net->net_ack == 0) {
3932 #ifdef SCTP_CWND_LOGGING
3933 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
3937 if (net->net_ack2 > 0) {
3939 * Karn's rule applies to clearing error count, this
3942 net->error_count = 0;
3943 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
3944 SCTP_ADDR_NOT_REACHABLE) {
3945 /* addr came good */
3946 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
3947 net->dest_state |= SCTP_ADDR_REACHABLE;
3948 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3949 SCTP_RECEIVED_SACK, (void *)net);
3950 /* now was it the primary? if so restore */
3951 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
3952 sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
3956 #ifdef JANA_CODE_WHY_THIS
3958 * Cannot skip for CMT. Need to come back and check these
3959 * variables for CMT. CMT fast recovery code. Need to debug.
3961 if (sctp_cmt_on_off == 1 &&
3962 net->fast_retran_loss_recovery &&
3963 net->will_exit_fast_recovery == 0)
3965 if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
3967 * If we are in loss recovery we skip any
3970 goto skip_cwnd_update;
3973 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
3976 if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) {
3977 /* If the cumulative ack moved we can proceed */
3978 if (net->cwnd <= net->ssthresh) {
3979 /* We are in slow start */
3980 if (net->flight_size + net->net_ack >=
3982 #ifdef SCTP_HIGH_SPEED
3983 sctp_hs_cwnd_increase(stcb, net);
3985 if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) {
3986 net->cwnd += (net->mtu * sctp_L2_abc_variable);
3987 #ifdef SCTP_CWND_MONITOR
3988 sctp_log_cwnd(stcb, net, net->mtu,
3989 SCTP_CWND_LOG_FROM_SS);
3993 net->cwnd += net->net_ack;
3994 #ifdef SCTP_CWND_MONITOR
3995 sctp_log_cwnd(stcb, net, net->net_ack,
3996 SCTP_CWND_LOG_FROM_SS);
4004 dif = net->cwnd - (net->flight_size +
4006 #ifdef SCTP_CWND_LOGGING
4007 sctp_log_cwnd(stcb, net, net->net_ack,
4008 SCTP_CWND_LOG_NOADV_SS);
4012 /* We are in congestion avoidance */
4013 if (net->flight_size + net->net_ack >=
4016 * add to pba only if we had a
4017 * cwnd's worth (or so) in flight OR
4018 * the burst limit was applied.
4020 net->partial_bytes_acked +=
4024 * Do we need to increase (if pba is
4027 if (net->partial_bytes_acked >=
4030 net->partial_bytes_acked) {
4031 net->partial_bytes_acked -=
4034 net->partial_bytes_acked =
4037 net->cwnd += net->mtu;
4038 #ifdef SCTP_CWND_MONITOR
4039 sctp_log_cwnd(stcb, net, net->mtu,
4040 SCTP_CWND_LOG_FROM_CA);
4043 #ifdef SCTP_CWND_LOGGING
4045 sctp_log_cwnd(stcb, net, net->net_ack,
4046 SCTP_CWND_LOG_NOADV_CA);
4052 #ifdef SCTP_CWND_LOGGING
4053 sctp_log_cwnd(stcb, net, net->net_ack,
4054 SCTP_CWND_LOG_NOADV_CA);
4056 dif = net->cwnd - (net->flight_size +
4061 #ifdef SCTP_CWND_LOGGING
4062 sctp_log_cwnd(stcb, net, net->mtu,
4063 SCTP_CWND_LOG_NO_CUMACK);
4068 * NOW, according to Karn's rule do we need to restore the
4069 * RTO timer back? Check our net_ack2. If not set then we
4070 * have a ambiguity.. i.e. all data ack'd was sent to more
4073 if (net->net_ack2) {
4074 /* restore any doubled timers */
4075 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
4076 if (net->RTO < stcb->asoc.minrto) {
4077 net->RTO = stcb->asoc.minrto;
4079 if (net->RTO > stcb->asoc.maxrto) {
4080 net->RTO = stcb->asoc.maxrto;
4088 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4089 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
4091 struct sctp_nets *net;
4092 struct sctp_association *asoc;
4093 struct sctp_tmit_chunk *tp1, *tp2;
4096 SCTP_TCB_LOCK_ASSERT(stcb);
4098 /* First setup for CC stuff */
4099 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4100 net->prev_cwnd = net->cwnd;
4104 if (sctp_strict_sacks) {
4107 if (TAILQ_EMPTY(&asoc->send_queue)) {
4108 send_s = asoc->sending_seq;
4110 tp1 = TAILQ_FIRST(&asoc->send_queue);
4111 send_s = tp1->rec.data.TSN_seq;
4113 if ((cumack == send_s) ||
4114 compare_with_wrap(cumack, send_s, MAX_TSN)) {
4115 #ifdef INVARIANTS /* for testing only */
4116 panic("Impossible sack 1");
4122 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4123 0, M_DONTWAIT, 1, MT_DATA);
4125 struct sctp_paramhdr *ph;
4128 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4130 ph = mtod(oper, struct sctp_paramhdr *);
4131 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4132 ph->param_length = htons(SCTP_BUF_LEN(oper));
4133 ippp = (uint32_t *) (ph + 1);
4134 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4136 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4137 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
4142 asoc->this_sack_highest_gap = cumack;
4143 stcb->asoc.overall_error_count = 0;
4144 /* process the new consecutive TSN first */
4145 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4147 tp2 = TAILQ_NEXT(tp1, sctp_next);
4148 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
4150 cumack == tp1->rec.data.TSN_seq) {
4151 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4153 * ECN Nonce: Add the nonce to the sender's
4156 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4157 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4159 * If it is less than ACKED, it is
4160 * now no-longer in flight. Higher
4161 * values may occur during marking
4163 #ifdef SCTP_FLIGHT_LOGGING
4164 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
4165 tp1->whoTo->flight_size,
4168 tp1->rec.data.TSN_seq);
4171 if (tp1->whoTo->flight_size >= tp1->book_size) {
4172 tp1->whoTo->flight_size -= tp1->book_size;
4174 tp1->whoTo->flight_size = 0;
4176 if (asoc->total_flight >= tp1->book_size) {
4177 asoc->total_flight -= tp1->book_size;
4178 if (asoc->total_flight_count > 0)
4179 asoc->total_flight_count--;
4181 asoc->total_flight = 0;
4182 asoc->total_flight_count = 0;
4184 tp1->whoTo->net_ack += tp1->send_size;
4185 if (tp1->snd_count < 2) {
4187 * True non-retransmited
4190 tp1->whoTo->net_ack2 +=
4193 /* update RTO too? */
4194 if ((tp1->do_rtt) && (tp1->whoTo->rto_pending)) {
4196 sctp_calculate_rto(stcb,
4198 &tp1->sent_rcv_time);
4199 tp1->whoTo->rto_pending = 0;
4203 #ifdef SCTP_CWND_LOGGING
4204 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4207 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4208 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4210 tp1->sent = SCTP_DATAGRAM_ACKED;
4215 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4217 sctp_free_bufspace(stcb, asoc, tp1, 1);
4218 sctp_m_freem(tp1->data);
4220 #ifdef SCTP_SACK_LOGGING
4221 sctp_log_sack(asoc->last_acked_seq,
4223 tp1->rec.data.TSN_seq,
4226 SCTP_LOG_FREE_SENT);
4229 asoc->sent_queue_cnt--;
4230 sctp_free_remote_addr(tp1->whoTo);
4231 sctp_free_a_chunk(stcb, tp1);
4234 if (stcb->sctp_socket) {
4235 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4236 #ifdef SCTP_WAKE_LOGGING
4237 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4239 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4240 #ifdef SCTP_WAKE_LOGGING
4242 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4246 if (asoc->last_acked_seq != cumack)
4247 sctp_cwnd_update(stcb, asoc, 1, 0, 0);
4248 asoc->last_acked_seq = cumack;
4249 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4250 /* nothing left in-flight */
4251 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4252 net->flight_size = 0;
4253 net->partial_bytes_acked = 0;
4255 asoc->total_flight = 0;
4256 asoc->total_flight_count = 0;
4258 /* Fix up the a-p-a-p for future PR-SCTP sends */
4259 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4260 asoc->advanced_peer_ack_point = cumack;
4262 /* ECN Nonce updates */
4263 if (asoc->ecn_nonce_allowed) {
4264 if (asoc->nonce_sum_check) {
4265 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4266 if (asoc->nonce_wait_for_ecne == 0) {
4267 struct sctp_tmit_chunk *lchk;
4269 lchk = TAILQ_FIRST(&asoc->send_queue);
4270 asoc->nonce_wait_for_ecne = 1;
4272 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4274 asoc->nonce_wait_tsn = asoc->sending_seq;
4277 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4278 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4280 * Misbehaving peer. We need
4281 * to react to this guy
4283 asoc->ecn_allowed = 0;
4284 asoc->ecn_nonce_allowed = 0;
4289 /* See if Resynchronization Possible */
4290 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4291 asoc->nonce_sum_check = 1;
4293 * now we must calculate what the base is.
4294 * We do this based on two things, we know
4295 * the total's for all the segments
4296 * gap-acked in the SACK (none), We also
4297 * know the SACK's nonce sum, its in
4298 * nonce_sum_flag. So we can build a truth
4299 * table to back-calculate the new value of
4300 * asoc->nonce_sum_expect_base:
4302 * SACK-flag-Value Seg-Sums Base 0 0 0
4306 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4311 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4312 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4313 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4314 /* SWS sender side engages */
4315 asoc->peers_rwnd = 0;
4317 /* Now assure a timer where data is queued at */
4320 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4321 if (net->flight_size) {
4324 if (net->RTO == 0) {
4325 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4327 to_ticks = MSEC_TO_TICKS(net->RTO);
4330 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4331 sctp_timeout_handler, &net->rxt_timer);
4333 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4334 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4336 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4338 if (sctp_early_fr) {
4339 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4340 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4341 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4342 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4347 if ((j == 0) && (!TAILQ_EMPTY(&asoc->sent_queue)) && (asoc->sent_queue_retran_cnt == 0)) {
4348 /* huh, this should not happen */
4350 panic("Flight size incorrect? fixing??");
4352 printf("Flight size incorrect? fixing\n");
4353 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4354 net->flight_size = 0;
4356 asoc->total_flight = 0;
4357 asoc->total_flight_count = 0;
4358 asoc->sent_queue_retran_cnt = 0;
4359 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4360 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4361 tp1->whoTo->flight_size += tp1->book_size;
4362 asoc->total_flight += tp1->book_size;
4363 asoc->total_flight_count++;
4364 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4365 asoc->sent_queue_retran_cnt++;
4371 /**********************************/
4372 /* Now what about shutdown issues */
4373 /**********************************/
4374 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4375 /* nothing left on sendqueue.. consider done */
4377 if ((asoc->stream_queue_cnt == 1) &&
4378 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4379 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4380 (asoc->locked_on_sending)
4382 struct sctp_stream_queue_pending *sp;
4385 * I may be in a state where we got all across.. but
4386 * cannot write more due to a shutdown... we abort
4387 * since the user did not indicate EOR in this case.
4388 * The sp will be cleaned during free of the asoc.
4390 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4392 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) {
4393 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4394 asoc->locked_on_sending = NULL;
4395 asoc->stream_queue_cnt--;
4398 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4399 (asoc->stream_queue_cnt == 0)) {
4400 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4401 /* Need to abort here */
4407 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4408 0, M_DONTWAIT, 1, MT_DATA);
4410 struct sctp_paramhdr *ph;
4413 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4415 ph = mtod(oper, struct sctp_paramhdr *);
4416 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4417 ph->param_length = htons(SCTP_BUF_LEN(oper));
4418 ippp = (uint32_t *) (ph + 1);
4419 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4421 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4422 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper);
4424 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4425 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4426 sctp_stop_timers_for_shutdown(stcb);
4427 sctp_send_shutdown(stcb,
4428 stcb->asoc.primary_destination);
4429 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4430 stcb->sctp_ep, stcb, asoc->primary_destination);
4431 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4432 stcb->sctp_ep, stcb, asoc->primary_destination);
4434 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4435 (asoc->stream_queue_cnt == 0)) {
4436 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4439 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
4440 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4441 sctp_send_shutdown_ack(stcb,
4442 stcb->asoc.primary_destination);
4444 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4445 stcb->sctp_ep, stcb, asoc->primary_destination);
4448 #ifdef SCTP_SACK_RWND_LOGGING
4449 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4451 stcb->asoc.peers_rwnd,
4452 stcb->asoc.total_flight,
4453 stcb->asoc.total_output_queue_size);
4461 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4462 struct sctp_nets *net_from, int *abort_now)
4464 struct sctp_association *asoc;
4465 struct sctp_sack *sack;
4466 struct sctp_tmit_chunk *tp1, *tp2;
4467 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4468 this_sack_lowest_newack;
4469 uint16_t num_seg, num_dup;
4470 uint16_t wake_him = 0;
4471 unsigned int sack_length;
4474 int accum_moved = 0;
4475 int will_exit_fast_recovery = 0;
4477 struct sctp_nets *net = NULL;
4478 int nonce_sum_flag, ecn_seg_sums = 0;
4479 uint8_t reneged_all = 0;
4480 uint8_t cmt_dac_flag;
4483 * we take any chance we can to service our queues since we cannot
4484 * get awoken when the socket is read from :<
4487 * Now perform the actual SACK handling: 1) Verify that it is not an
4488 * old sack, if so discard. 2) If there is nothing left in the send
4489 * queue (cum-ack is equal to last acked) then you have a duplicate
4490 * too, update any rwnd change and verify no timers are running.
4491 * then return. 3) Process any new consequtive data i.e. cum-ack
4492 * moved process these first and note that it moved. 4) Process any
4493 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4494 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4495 * sync up flightsizes and things, stop all timers and also check
4496 * for shutdown_pending state. If so then go ahead and send off the
4497 * shutdown. If in shutdown recv, send off the shutdown-ack and
4498 * start that timer, Ret. 9) Strike any non-acked things and do FR
4499 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4500 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4501 * if in shutdown_recv state.
4503 SCTP_TCB_LOCK_ASSERT(stcb);
4506 this_sack_lowest_newack = 0;
4508 sack_length = ntohs(ch->ch.chunk_length);
4509 if (sack_length < sizeof(struct sctp_sack_chunk)) {
4511 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4512 printf("Bad size on sack chunk .. to small\n");
4518 SCTP_STAT_INCR(sctps_slowpath_sack);
4519 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4520 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4521 num_seg = ntohs(sack->num_gap_ack_blks);
4522 a_rwnd = (uint32_t) ntohl(sack->a_rwnd);
4525 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4526 num_dup = ntohs(sack->num_dup_tsns);
4529 stcb->asoc.overall_error_count = 0;
4531 #ifdef SCTP_SACK_LOGGING
4532 sctp_log_sack(asoc->last_acked_seq,
4539 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
4541 int off_to_dup, iii;
4544 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4545 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4546 dupdata = (uint32_t *) ((caddr_t)ch + off_to_dup);
4547 for (iii = 0; iii < num_dup; iii++) {
4548 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4553 printf("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4554 off_to_dup, num_dup, sack_length, num_seg);
4559 if (TAILQ_EMPTY(&asoc->send_queue)) {
4560 send_s = asoc->sending_seq;
4562 tp1 = TAILQ_FIRST(&asoc->send_queue);
4563 send_s = tp1->rec.data.TSN_seq;
4566 if (sctp_strict_sacks) {
4567 if (cum_ack == send_s ||
4568 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4569 #ifdef INVARIANTS /* for testing only */
4571 panic("Impossible sack 1");
4576 * no way, we have not even sent this TSN out yet.
4577 * Peer is hopelessly messed up with us.
4582 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4583 0, M_DONTWAIT, 1, MT_DATA);
4585 struct sctp_paramhdr *ph;
4588 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4590 ph = mtod(oper, struct sctp_paramhdr *);
4591 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4592 ph->param_length = htons(SCTP_BUF_LEN(oper));
4593 ippp = (uint32_t *) (ph + 1);
4594 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4596 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4597 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
4602 /**********************/
4603 /* 1) check the range */
4604 /**********************/
4605 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4606 /* acking something behind */
4609 /* update the Rwnd of the peer */
4610 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4611 TAILQ_EMPTY(&asoc->send_queue) &&
4612 (asoc->stream_queue_cnt == 0)
4614 /* nothing left on send/sent and strmq */
4615 #ifdef SCTP_LOG_RWND
4616 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4617 asoc->peers_rwnd, 0, 0, a_rwnd);
4619 asoc->peers_rwnd = a_rwnd;
4620 if (asoc->sent_queue_retran_cnt) {
4621 asoc->sent_queue_retran_cnt = 0;
4623 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4624 /* SWS sender side engages */
4625 asoc->peers_rwnd = 0;
4627 /* stop any timers */
4628 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4629 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4630 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4631 if (sctp_early_fr) {
4632 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4633 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4634 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4635 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4638 net->partial_bytes_acked = 0;
4639 net->flight_size = 0;
4641 asoc->total_flight = 0;
4642 asoc->total_flight_count = 0;
4646 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4647 * things. The total byte count acked is tracked in netAckSz AND
4648 * netAck2 is used to track the total bytes acked that are un-
4649 * amibguious and were never retransmitted. We track these on a per
4650 * destination address basis.
4652 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4653 net->prev_cwnd = net->cwnd;
4658 * CMT: Reset CUC algo variable before SACK processing
4660 net->new_pseudo_cumack = 0;
4661 net->will_exit_fast_recovery = 0;
4663 /* process the new consecutive TSN first */
4664 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4666 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4668 last_tsn == tp1->rec.data.TSN_seq) {
4669 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4671 * ECN Nonce: Add the nonce to the sender's
4674 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4676 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4678 * If it is less than ACKED, it is
4679 * now no-longer in flight. Higher
4680 * values may occur during marking
4682 if ((tp1->whoTo->dest_state &
4683 SCTP_ADDR_UNCONFIRMED) &&
4684 (tp1->snd_count < 2)) {
4686 * If there was no retran
4687 * and the address is
4688 * un-confirmed and we sent
4690 * sacked.. its confirmed,
4693 tp1->whoTo->dest_state &=
4694 ~SCTP_ADDR_UNCONFIRMED;
4696 #ifdef SCTP_FLIGHT_LOGGING
4697 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
4698 tp1->whoTo->flight_size,
4701 tp1->rec.data.TSN_seq);
4703 if (tp1->whoTo->flight_size >= tp1->book_size) {
4704 tp1->whoTo->flight_size -= tp1->book_size;
4706 tp1->whoTo->flight_size = 0;
4708 if (asoc->total_flight >= tp1->book_size) {
4709 asoc->total_flight -= tp1->book_size;
4710 if (asoc->total_flight_count > 0)
4711 asoc->total_flight_count--;
4713 asoc->total_flight = 0;
4714 asoc->total_flight_count = 0;
4716 tp1->whoTo->net_ack += tp1->send_size;
4718 /* CMT SFR and DAC algos */
4719 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4720 tp1->whoTo->saw_newack = 1;
4722 if (tp1->snd_count < 2) {
4724 * True non-retransmited
4727 tp1->whoTo->net_ack2 +=
4730 /* update RTO too? */
4733 sctp_calculate_rto(stcb,
4735 &tp1->sent_rcv_time);
4736 tp1->whoTo->rto_pending = 0;
4741 * CMT: CUCv2 algorithm. From the
4742 * cumack'd TSNs, for each TSN being
4743 * acked for the first time, set the
4744 * following variables for the
4745 * corresp destination.
4746 * new_pseudo_cumack will trigger a
4748 * find_(rtx_)pseudo_cumack will
4749 * trigger search for the next
4750 * expected (rtx-)pseudo-cumack.
4752 tp1->whoTo->new_pseudo_cumack = 1;
4753 tp1->whoTo->find_pseudo_cumack = 1;
4754 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4757 #ifdef SCTP_SACK_LOGGING
4758 sctp_log_sack(asoc->last_acked_seq,
4760 tp1->rec.data.TSN_seq,
4763 SCTP_LOG_TSN_ACKED);
4765 #ifdef SCTP_CWND_LOGGING
4766 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4769 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4770 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4771 #ifdef SCTP_AUDITING_ENABLED
4772 sctp_audit_log(0xB3,
4773 (asoc->sent_queue_retran_cnt & 0x000000ff));
4776 tp1->sent = SCTP_DATAGRAM_ACKED;
4781 tp1 = TAILQ_NEXT(tp1, sctp_next);
4783 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4784 /* always set this up to cum-ack */
4785 asoc->this_sack_highest_gap = last_tsn;
4787 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
4789 /* skip corrupt segments */
4795 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4796 * to be greater than the cumack. Also reset saw_newack to 0
4799 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4800 net->saw_newack = 0;
4801 net->this_sack_highest_newack = last_tsn;
4805 * thisSackHighestGap will increase while handling NEW
4806 * segments this_sack_highest_newack will increase while
4807 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4808 * used for CMT DAC algo. saw_newack will also change.
4810 sctp_handle_segments(stcb, asoc, ch, last_tsn,
4811 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4812 num_seg, &ecn_seg_sums);
4814 if (sctp_strict_sacks) {
4816 * validate the biggest_tsn_acked in the gap acks if
4817 * strict adherence is wanted.
4819 if ((biggest_tsn_acked == send_s) ||
4820 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4822 * peer is either confused or we are under
4823 * attack. We must abort.
4830 /*******************************************/
4831 /* cancel ALL T3-send timer if accum moved */
4832 /*******************************************/
4833 if (sctp_cmt_on_off) {
4834 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4835 if (net->new_pseudo_cumack)
4836 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4838 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4843 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4844 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4845 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4849 /********************************************/
4850 /* drop the acked chunks from the sendqueue */
4851 /********************************************/
4852 asoc->last_acked_seq = cum_ack;
4854 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4858 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4862 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4863 /* no more sent on list */
4866 tp2 = TAILQ_NEXT(tp1, sctp_next);
4867 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4869 * Friendlier printf in lieu of panic now that I think its
4873 if (tp1->pr_sctp_on) {
4874 if (asoc->pr_sctp_cnt != 0)
4875 asoc->pr_sctp_cnt--;
4877 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
4878 (asoc->total_flight > 0)) {
4879 printf("Warning flight size incorrect should be 0 is %d\n",
4880 asoc->total_flight);
4881 asoc->total_flight = 0;
4884 sctp_free_bufspace(stcb, asoc, tp1, 1);
4885 sctp_m_freem(tp1->data);
4886 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4887 asoc->sent_queue_cnt_removeable--;
4890 #ifdef SCTP_SACK_LOGGING
4891 sctp_log_sack(asoc->last_acked_seq,
4893 tp1->rec.data.TSN_seq,
4896 SCTP_LOG_FREE_SENT);
4899 asoc->sent_queue_cnt--;
4900 sctp_free_remote_addr(tp1->whoTo);
4902 sctp_free_a_chunk(stcb, tp1);
4905 } while (tp1 != NULL);
4908 if ((wake_him) && (stcb->sctp_socket)) {
4909 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4910 #ifdef SCTP_WAKE_LOGGING
4911 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4913 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4914 #ifdef SCTP_WAKE_LOGGING
4916 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4920 if ((sctp_cmt_on_off == 0) && asoc->fast_retran_loss_recovery && accum_moved) {
4921 if (compare_with_wrap(asoc->last_acked_seq,
4922 asoc->fast_recovery_tsn, MAX_TSN) ||
4923 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4924 /* Setup so we will exit RFC2582 fast recovery */
4925 will_exit_fast_recovery = 1;
4929 * Check for revoked fragments:
4931 * if Previous sack - Had no frags then we can't have any revoked if
4932 * Previous sack - Had frag's then - If we now have frags aka
4933 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4934 * some of them. else - The peer revoked all ACKED fragments, since
4935 * we had some before and now we have NONE.
4938 if (sctp_cmt_on_off) {
4940 * Don't check for revoked if CMT is ON. CMT causes
4941 * reordering of data and acks (received on different
4942 * interfaces) can be persistently reordered. Acking
4943 * followed by apparent revoking and re-acking causes
4944 * unexpected weird behavior. So, at this time, CMT does not
4945 * respect renegs. Renegs will have to be recovered through
4946 * a timeout. Not a big deal for such a rare event.
4949 sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked);
4950 else if (asoc->saw_sack_with_frags) {
4951 int cnt_revoked = 0;
4953 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4955 /* Peer revoked all dg's marked or acked */
4956 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4957 if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
4958 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
4959 tp1->sent = SCTP_DATAGRAM_SENT;
4960 tp1->rec.data.chunk_was_revoked = 1;
4961 tp1->whoTo->flight_size += tp1->book_size;
4962 asoc->total_flight_count++;
4963 asoc->total_flight += tp1->book_size;
4971 asoc->saw_sack_with_frags = 0;
4974 asoc->saw_sack_with_frags = 1;
4976 asoc->saw_sack_with_frags = 0;
4979 sctp_cwnd_update(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4981 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4982 /* nothing left in-flight */
4983 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4984 /* stop all timers */
4985 if (sctp_early_fr) {
4986 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4987 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4988 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4989 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4992 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4993 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4994 net->flight_size = 0;
4995 net->partial_bytes_acked = 0;
4997 asoc->total_flight = 0;
4998 asoc->total_flight_count = 0;
5000 /**********************************/
5001 /* Now what about shutdown issues */
5002 /**********************************/
5003 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5004 /* nothing left on sendqueue.. consider done */
5005 #ifdef SCTP_LOG_RWND
5006 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5007 asoc->peers_rwnd, 0, 0, a_rwnd);
5009 asoc->peers_rwnd = a_rwnd;
5010 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5011 /* SWS sender side engages */
5012 asoc->peers_rwnd = 0;
5015 if ((asoc->stream_queue_cnt == 1) &&
5016 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5017 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5018 (asoc->locked_on_sending)
5020 struct sctp_stream_queue_pending *sp;
5023 * I may be in a state where we got all across.. but
5024 * cannot write more due to a shutdown... we abort
5025 * since the user did not indicate EOR in this case.
5027 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5029 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) {
5030 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5031 asoc->locked_on_sending = NULL;
5032 asoc->stream_queue_cnt--;
5035 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5036 (asoc->stream_queue_cnt == 0)) {
5037 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5038 /* Need to abort here */
5044 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5045 0, M_DONTWAIT, 1, MT_DATA);
5047 struct sctp_paramhdr *ph;
5050 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5052 ph = mtod(oper, struct sctp_paramhdr *);
5053 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5054 ph->param_length = htons(SCTP_BUF_LEN(oper));
5055 ippp = (uint32_t *) (ph + 1);
5056 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5058 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5059 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper);
5062 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
5063 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5064 sctp_stop_timers_for_shutdown(stcb);
5065 sctp_send_shutdown(stcb,
5066 stcb->asoc.primary_destination);
5067 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5068 stcb->sctp_ep, stcb, asoc->primary_destination);
5069 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5070 stcb->sctp_ep, stcb, asoc->primary_destination);
5073 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5074 (asoc->stream_queue_cnt == 0)) {
5075 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5078 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
5079 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5080 sctp_send_shutdown_ack(stcb,
5081 stcb->asoc.primary_destination);
5083 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5084 stcb->sctp_ep, stcb, asoc->primary_destination);
5089 * Now here we are going to recycle net_ack for a different use...
5092 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5097 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5098 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5099 * automatically ensure that.
5101 if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) {
5102 this_sack_lowest_newack = cum_ack;
5105 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5106 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5108 /*********************************************/
5109 /* Here we perform PR-SCTP procedures */
5111 /*********************************************/
5112 /* C1. update advancedPeerAckPoint */
5113 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5114 asoc->advanced_peer_ack_point = cum_ack;
5116 /* C2. try to further move advancedPeerAckPoint ahead */
5118 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5119 struct sctp_tmit_chunk *lchk;
5121 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5122 /* C3. See if we need to send a Fwd-TSN */
5123 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5126 * ISSUE with ECN, see FWD-TSN processing for notes
5127 * on issues that will occur when the ECN NONCE
5128 * stuff is put into SCTP for cross checking.
5130 send_forward_tsn(stcb, asoc);
5133 * ECN Nonce: Disable Nonce Sum check when FWD TSN
5134 * is sent and store resync tsn
5136 asoc->nonce_sum_check = 0;
5137 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5139 /* Assure a timer is up */
5140 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5141 stcb->sctp_ep, stcb, lchk->whoTo);
5146 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
5147 * (net->fast_retran_loss_recovery == 0)))
5149 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5150 if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) {
5151 /* out of a RFC2582 Fast recovery window? */
5152 if (net->net_ack > 0) {
5154 * per section 7.2.3, are there any
5155 * destinations that had a fast retransmit
5156 * to them. If so what we need to do is
5157 * adjust ssthresh and cwnd.
5159 struct sctp_tmit_chunk *lchk;
5161 #ifdef SCTP_HIGH_SPEED
5162 sctp_hs_cwnd_decrease(stcb, net);
5164 #ifdef SCTP_CWND_MONITOR
5165 int old_cwnd = net->cwnd;
5168 net->ssthresh = net->cwnd / 2;
5169 if (net->ssthresh < (net->mtu * 2)) {
5170 net->ssthresh = 2 * net->mtu;
5172 net->cwnd = net->ssthresh;
5173 #ifdef SCTP_CWND_MONITOR
5174 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
5175 SCTP_CWND_LOG_FROM_FR);
5179 lchk = TAILQ_FIRST(&asoc->send_queue);
5181 net->partial_bytes_acked = 0;
5182 /* Turn on fast recovery window */
5183 asoc->fast_retran_loss_recovery = 1;
5185 /* Mark end of the window */
5186 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
5188 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
5192 * CMT fast recovery -- per destination
5193 * recovery variable.
5195 net->fast_retran_loss_recovery = 1;
5198 /* Mark end of the window */
5199 net->fast_recovery_tsn = asoc->sending_seq - 1;
5201 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
5207 * Disable Nonce Sum Checking and store the
5210 asoc->nonce_sum_check = 0;
5211 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
5213 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
5214 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5215 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5216 stcb->sctp_ep, stcb, net);
5218 } else if (net->net_ack > 0) {
5220 * Mark a peg that we WOULD have done a cwnd
5221 * reduction but RFC2582 prevented this action.
5223 SCTP_STAT_INCR(sctps_fastretransinrtt);
5228 /******************************************************************
5229 * Here we do the stuff with ECN Nonce checking.
5230 * We basically check to see if the nonce sum flag was incorrect
5231 * or if resynchronization needs to be done. Also if we catch a
5232 * misbehaving receiver we give him the kick.
5233 ******************************************************************/
5235 if (asoc->ecn_nonce_allowed) {
5236 if (asoc->nonce_sum_check) {
5237 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5238 if (asoc->nonce_wait_for_ecne == 0) {
5239 struct sctp_tmit_chunk *lchk;
5241 lchk = TAILQ_FIRST(&asoc->send_queue);
5242 asoc->nonce_wait_for_ecne = 1;
5244 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5246 asoc->nonce_wait_tsn = asoc->sending_seq;
5249 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5250 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5252 * Misbehaving peer. We need
5253 * to react to this guy
5255 asoc->ecn_allowed = 0;
5256 asoc->ecn_nonce_allowed = 0;
5261 /* See if Resynchronization Possible */
5262 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5263 asoc->nonce_sum_check = 1;
5265 * now we must calculate what the base is.
5266 * We do this based on two things, we know
5267 * the total's for all the segments
5268 * gap-acked in the SACK, its stored in
5269 * ecn_seg_sums. We also know the SACK's
5270 * nonce sum, its in nonce_sum_flag. So we
5271 * can build a truth table to back-calculate
5273 * asoc->nonce_sum_expect_base:
5275 * SACK-flag-Value Seg-Sums Base 0 0 0
5279 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5283 /* Now are we exiting loss recovery ? */
5284 if (will_exit_fast_recovery) {
5285 /* Ok, we must exit fast recovery */
5286 asoc->fast_retran_loss_recovery = 0;
5288 if ((asoc->sat_t3_loss_recovery) &&
5289 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5291 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5292 /* end satellite t3 loss recovery */
5293 asoc->sat_t3_loss_recovery = 0;
5295 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5296 if (net->will_exit_fast_recovery) {
5297 /* Ok, we must exit fast recovery */
5298 net->fast_retran_loss_recovery = 0;
5302 /* Adjust and set the new rwnd value */
5303 #ifdef SCTP_LOG_RWND
5304 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5305 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
5308 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5309 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
5310 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5311 /* SWS sender side engages */
5312 asoc->peers_rwnd = 0;
5315 * Now we must setup so we have a timer up for anyone with
5320 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5321 if (net->flight_size) {
5323 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5324 stcb->sctp_ep, stcb, net);
5327 if ((j == 0) && (!TAILQ_EMPTY(&asoc->sent_queue)) && (asoc->sent_queue_retran_cnt == 0)) {
5328 /* huh, this should not happen */
5330 panic("Flight size incorrect? fixing??");
5332 printf("Flight size incorrect? fixing??\n");
5333 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5334 net->flight_size = 0;
5336 asoc->total_flight = 0;
5337 asoc->total_flight_count = 0;
5338 asoc->sent_queue_retran_cnt = 0;
5339 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5340 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5341 tp1->whoTo->flight_size += tp1->book_size;
5342 asoc->total_flight += tp1->book_size;
5343 asoc->total_flight_count++;
5344 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5345 asoc->sent_queue_retran_cnt++;
5351 #ifdef SCTP_SACK_RWND_LOGGING
5352 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5354 stcb->asoc.peers_rwnd,
5355 stcb->asoc.total_flight,
5356 stcb->asoc.total_output_queue_size);
5363 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5364 struct sctp_nets *netp, int *abort_flag)
5367 uint32_t cum_ack, a_rwnd;
5369 cum_ack = ntohl(cp->cumulative_tsn_ack);
5370 /* Arrange so a_rwnd does NOT change */
5371 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5373 /* Now call the express sack handling */
5374 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5378 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5379 struct sctp_stream_in *strmin)
5381 struct sctp_queued_to_read *ctl, *nctl;
5382 struct sctp_association *asoc;
5386 tt = strmin->last_sequence_delivered;
5388 * First deliver anything prior to and including the stream no that
5391 ctl = TAILQ_FIRST(&strmin->inqueue);
5393 nctl = TAILQ_NEXT(ctl, next);
5394 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5395 (tt == ctl->sinfo_ssn)) {
5396 /* this is deliverable now */
5397 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5398 /* subtract pending on streams */
5399 asoc->size_on_all_streams -= ctl->length;
5400 sctp_ucount_decr(asoc->cnt_on_all_streams);
5401 /* deliver it to at least the delivery-q */
5402 if (stcb->sctp_socket) {
5403 sctp_add_to_readq(stcb->sctp_ep, stcb,
5405 &stcb->sctp_socket->so_rcv, 1);
5408 /* no more delivery now. */
5414 * now we must deliver things in queue the normal way if any are
5417 tt = strmin->last_sequence_delivered + 1;
5418 ctl = TAILQ_FIRST(&strmin->inqueue);
5420 nctl = TAILQ_NEXT(ctl, next);
5421 if (tt == ctl->sinfo_ssn) {
5422 /* this is deliverable now */
5423 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5424 /* subtract pending on streams */
5425 asoc->size_on_all_streams -= ctl->length;
5426 sctp_ucount_decr(asoc->cnt_on_all_streams);
5427 /* deliver it to at least the delivery-q */
5428 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5429 if (stcb->sctp_socket) {
5430 sctp_add_to_readq(stcb->sctp_ep, stcb,
5432 &stcb->sctp_socket->so_rcv, 1);
5434 tt = strmin->last_sequence_delivered + 1;
5443 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5444 struct sctp_forward_tsn_chunk *fwd, int *abort_flag)
5447 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5448 * forward TSN, when the SACK comes back that acknowledges the
5449 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5450 * get quite tricky since we may have sent more data interveneing
5451 * and must carefully account for what the SACK says on the nonce
5452 * and any gaps that are reported. This work will NOT be done here,
5453 * but I note it here since it is really related to PR-SCTP and
5457 /* The pr-sctp fwd tsn */
5459 * here we will perform all the data receiver side steps for
5460 * processing FwdTSN, as required in by pr-sctp draft:
5462 * Assume we get FwdTSN(x):
5464 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5465 * others we have 3) examine and update re-ordering queue on
5466 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5467 * report where we are.
5469 struct sctp_strseq *stseq;
5470 struct sctp_association *asoc;
5471 uint32_t new_cum_tsn, gap, back_out_htsn;
5472 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
5473 struct sctp_stream_in *strm;
5474 struct sctp_tmit_chunk *chk, *at;
5476 cumack_set_flag = 0;
5479 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5481 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
5482 printf("Bad size too small/big fwd-tsn\n");
5487 m_size = (stcb->asoc.mapping_array_size << 3);
5488 /*************************************************************/
5489 /* 1. Here we update local cumTSN and shift the bitmap array */
5490 /*************************************************************/
5491 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5493 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5494 asoc->cumulative_tsn == new_cum_tsn) {
5495 /* Already got there ... */
5498 back_out_htsn = asoc->highest_tsn_inside_map;
5499 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
5501 asoc->highest_tsn_inside_map = new_cum_tsn;
5502 #ifdef SCTP_MAP_LOGGING
5503 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5507 * now we know the new TSN is more advanced, let's find the actual
5510 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
5512 (new_cum_tsn == asoc->mapping_array_base_tsn)) {
5513 gap = new_cum_tsn - asoc->mapping_array_base_tsn;
5515 /* try to prevent underflow here */
5516 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
5519 if (gap > m_size || gap < 0) {
5520 asoc->highest_tsn_inside_map = back_out_htsn;
5521 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5523 * out of range (of single byte chunks in the rwnd I
5524 * give out) too questionable. better to drop it
5529 if (asoc->highest_tsn_inside_map >
5530 asoc->mapping_array_base_tsn) {
5531 gap = asoc->highest_tsn_inside_map -
5532 asoc->mapping_array_base_tsn;
5534 gap = asoc->highest_tsn_inside_map +
5535 (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
5537 cumack_set_flag = 1;
5539 for (i = 0; i <= gap; i++) {
5540 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
5543 * Now after marking all, slide thing forward but no sack please.
5545 sctp_sack_check(stcb, 0, 0, abort_flag);
5549 if (cumack_set_flag) {
5551 * fwd-tsn went outside my gap array - not a common
5552 * occurance. Do the same thing we do when a cookie-echo
5555 asoc->highest_tsn_inside_map = new_cum_tsn - 1;
5556 asoc->mapping_array_base_tsn = new_cum_tsn;
5557 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
5558 #ifdef SCTP_MAP_LOGGING
5559 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5561 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5563 /*************************************************************/
5564 /* 2. Clear up re-assembly queue */
5565 /*************************************************************/
5568 * First service it if pd-api is up, just in case we can progress it
5571 if (asoc->fragmented_delivery_inprogress) {
5572 sctp_service_reassembly(stcb, asoc);
5574 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5575 /* For each one on here see if we need to toss it */
5577 * For now large messages held on the reasmqueue that are
5578 * complete will be tossed too. We could in theory do more
5579 * work to spin through and stop after dumping one msg aka
5580 * seeing the start of a new msg at the head, and call the
5581 * delivery function... to see if it can be delivered... But
5582 * for now we just dump everything on the queue.
5584 chk = TAILQ_FIRST(&asoc->reasmqueue);
5586 at = TAILQ_NEXT(chk, sctp_next);
5587 if (compare_with_wrap(asoc->cumulative_tsn,
5588 chk->rec.data.TSN_seq, MAX_TSN) ||
5589 asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
5590 /* It needs to be tossed */
5591 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5592 if (compare_with_wrap(chk->rec.data.TSN_seq,
5593 asoc->tsn_last_delivered, MAX_TSN)) {
5594 asoc->tsn_last_delivered =
5595 chk->rec.data.TSN_seq;
5596 asoc->str_of_pdapi =
5597 chk->rec.data.stream_number;
5598 asoc->ssn_of_pdapi =
5599 chk->rec.data.stream_seq;
5600 asoc->fragment_flags =
5601 chk->rec.data.rcv_flags;
5603 asoc->size_on_reasm_queue -= chk->send_size;
5604 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5607 /* Clear up any stream problem */
5608 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5609 SCTP_DATA_UNORDERED &&
5610 (compare_with_wrap(chk->rec.data.stream_seq,
5611 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5614 * We must dump forward this streams
5615 * sequence number if the chunk is
5616 * not unordered that is being
5617 * skipped. There is a chance that
5618 * if the peer does not include the
5619 * last fragment in its FWD-TSN we
5620 * WILL have a problem here since
5621 * you would have a partial chunk in
5622 * queue that may not be
5623 * deliverable. Also if a Partial
5624 * delivery API as started the user
5625 * may get a partial chunk. The next
5626 * read returning a new chunk...
5627 * really ugly but I see no way
5628 * around it! Maybe a notify??
5630 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5631 chk->rec.data.stream_seq;
5634 sctp_m_freem(chk->data);
5637 sctp_free_remote_addr(chk->whoTo);
5638 sctp_free_a_chunk(stcb, chk);
5641 * Ok we have gone beyond the end of the
5642 * fwd-tsn's mark. Some checks...
5644 if ((asoc->fragmented_delivery_inprogress) &&
5645 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5647 * Special case PD-API is up and
5648 * what we fwd-tsn' over includes
5649 * one that had the LAST_FRAG. We no
5650 * longer need to do the PD-API.
5652 asoc->fragmented_delivery_inprogress = 0;
5653 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5654 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
5662 if (asoc->fragmented_delivery_inprogress) {
5664 * Ok we removed cnt_gone chunks in the PD-API queue that
5665 * were being delivered. So now we must turn off the flag.
5667 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5668 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
5669 asoc->fragmented_delivery_inprogress = 0;
5671 /*************************************************************/
5672 /* 3. Update the PR-stream re-ordering queues */
5673 /*************************************************************/
5674 stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd));
5675 fwd_sz -= sizeof(*fwd);
5680 num_str = fwd_sz / sizeof(struct sctp_strseq);
5681 for (i = 0; i < num_str; i++) {
5686 xx = (unsigned char *)&stseq[i];
5687 st = ntohs(stseq[i].stream);
5688 stseq[i].stream = st;
5689 st = ntohs(stseq[i].sequence);
5690 stseq[i].sequence = st;
5692 if (stseq[i].stream > asoc->streamincnt) {
5694 * It is arguable if we should continue.
5695 * Since the peer sent bogus stream info we
5696 * may be in deep trouble.. a return may be
5701 strm = &asoc->strmin[stseq[i].stream];
5702 if (compare_with_wrap(stseq[i].sequence,
5703 strm->last_sequence_delivered, MAX_SEQ)) {
5704 /* Update the sequence number */
5705 strm->last_sequence_delivered =
5708 /* now kick the stream the new way */
5709 sctp_kick_prsctp_reorder_queue(stcb, strm);
5712 if (TAILQ_FIRST(&asoc->reasmqueue)) {
5713 /* now lets kick out and check for more fragmented delivery */
5714 sctp_deliver_reasm_check(stcb, &stcb->asoc);