2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 uint32_t calc, calc_w_oh;
65 * This is really set wrong with respect to a 1-2-m socket. Since
66 * the sb_cc is the count that everyone as put up. When we re-write
67 * sctp_soreceive then we will fix this so that ONLY this
68 * associations data is taken into account.
70 if (stcb->sctp_socket == NULL)
73 if (stcb->asoc.sb_cc == 0 &&
74 asoc->size_on_reasm_queue == 0 &&
75 asoc->size_on_all_streams == 0) {
76 /* Full rwnd granted */
77 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
81 /* get actual space */
82 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
85 * take out what has NOT been put on socket queue and we yet hold
88 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
89 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
96 /* what is the overhead of all these rwnd's */
97 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
101 * If our overhead is greater than the advertised rwnd, we
102 * clamp the rwnd to 1. This lets us still accept inbound
103 * segments, but hopefully will shut the sender down when he
104 * finally gets the message.
110 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
111 /* SWS engaged, tell peer none left */
117 /* Calculate what the rwnd would be */
120 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
122 uint32_t calc = 0, calc_w_oh;
125 * This is really set wrong with respect to a 1-2-m socket. Since
126 * the sb_cc is the count that everyone as put up. When we re-write
127 * sctp_soreceive then we will fix this so that ONLY this
128 * associations data is taken into account.
130 if (stcb->sctp_socket == NULL)
133 if (stcb->asoc.sb_cc == 0 &&
134 asoc->size_on_reasm_queue == 0 &&
135 asoc->size_on_all_streams == 0) {
136 /* Full rwnd granted */
137 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
141 /* get actual space */
142 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
145 * take out what has NOT been put on socket queue and we yet hold
148 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
149 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
155 /* what is the overhead of all these rwnd's */
156 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
157 if (calc_w_oh == 0) {
159 * If our overhead is greater than the advertised rwnd, we
160 * clamp the rwnd to 1. This lets us still accept inbound
161 * segments, but hopefully will shut the sender down when he
162 * finally gets the message.
168 (calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
169 /* SWS engaged, tell peer none left */
179 * Build out our readq entry based on the incoming packet.
181 struct sctp_queued_to_read *
182 sctp_build_readq_entry(struct sctp_tcb *stcb,
183 struct sctp_nets *net,
184 uint32_t tsn, uint32_t ppid,
185 uint32_t context, uint16_t stream_no,
186 uint16_t stream_seq, uint8_t flags,
189 struct sctp_queued_to_read *read_queue_e = NULL;
191 sctp_alloc_a_readq(stcb, read_queue_e);
192 if (read_queue_e == NULL) {
195 read_queue_e->sinfo_stream = stream_no;
196 read_queue_e->sinfo_ssn = stream_seq;
197 read_queue_e->sinfo_flags = (flags << 8);
198 read_queue_e->sinfo_ppid = ppid;
199 read_queue_e->sinfo_context = stcb->asoc.context;
200 read_queue_e->sinfo_timetolive = 0;
201 read_queue_e->sinfo_tsn = tsn;
202 read_queue_e->sinfo_cumtsn = tsn;
203 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
204 read_queue_e->whoFrom = net;
205 read_queue_e->length = 0;
206 atomic_add_int(&net->ref_count, 1);
207 read_queue_e->data = dm;
208 read_queue_e->spec_flags = 0;
209 read_queue_e->tail_mbuf = NULL;
210 read_queue_e->aux_data = NULL;
211 read_queue_e->stcb = stcb;
212 read_queue_e->port_from = stcb->rport;
213 read_queue_e->do_not_ref_stcb = 0;
214 read_queue_e->end_added = 0;
215 read_queue_e->some_taken = 0;
216 read_queue_e->pdapi_aborted = 0;
218 return (read_queue_e);
223 * Build out our readq entry based on the incoming packet.
225 static struct sctp_queued_to_read *
226 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
227 struct sctp_tmit_chunk *chk)
229 struct sctp_queued_to_read *read_queue_e = NULL;
231 sctp_alloc_a_readq(stcb, read_queue_e);
232 if (read_queue_e == NULL) {
235 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
236 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
237 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
238 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
239 read_queue_e->sinfo_context = stcb->asoc.context;
240 read_queue_e->sinfo_timetolive = 0;
241 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
242 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
243 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
244 read_queue_e->whoFrom = chk->whoTo;
245 read_queue_e->aux_data = NULL;
246 read_queue_e->length = 0;
247 atomic_add_int(&chk->whoTo->ref_count, 1);
248 read_queue_e->data = chk->data;
249 read_queue_e->tail_mbuf = NULL;
250 read_queue_e->stcb = stcb;
251 read_queue_e->port_from = stcb->rport;
252 read_queue_e->spec_flags = 0;
253 read_queue_e->do_not_ref_stcb = 0;
254 read_queue_e->end_added = 0;
255 read_queue_e->some_taken = 0;
256 read_queue_e->pdapi_aborted = 0;
258 return (read_queue_e);
263 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
264 struct sctp_sndrcvinfo *sinfo)
266 struct sctp_sndrcvinfo *outinfo;
270 int use_extended = 0;
272 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
273 /* user does not want the sndrcv ctl */
276 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
278 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
280 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
284 ret = sctp_get_mbuf_for_msg(len,
285 0, M_DONTWAIT, 1, MT_DATA);
291 /* We need a CMSG header followed by the struct */
292 cmh = mtod(ret, struct cmsghdr *);
293 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
294 cmh->cmsg_level = IPPROTO_SCTP;
296 cmh->cmsg_type = SCTP_EXTRCV;
298 memcpy(outinfo, sinfo, len);
300 cmh->cmsg_type = SCTP_SNDRCV;
304 SCTP_BUF_LEN(ret) = cmh->cmsg_len;
310 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
312 struct sctp_sndrcvinfo *sinfo)
314 struct sctp_sndrcvinfo *outinfo;
318 int use_extended = 0;
320 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
321 /* user does not want the sndrcv ctl */
324 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
326 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
328 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
330 SCTP_MALLOC(buf, char *, len, "SCTP_CMSG");
335 /* We need a CMSG header followed by the struct */
336 cmh = (struct cmsghdr *)buf;
337 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
338 cmh->cmsg_level = IPPROTO_SCTP;
340 cmh->cmsg_type = SCTP_EXTRCV;
342 memcpy(outinfo, sinfo, len);
344 cmh->cmsg_type = SCTP_SNDRCV;
354 * We are delivering currently from the reassembly queue. We must continue to
355 * deliver until we either: 1) run out of space. 2) run out of sequential
356 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
359 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
361 struct sctp_tmit_chunk *chk;
366 struct sctp_queued_to_read *control, *ctl, *ctlat;
371 cntDel = stream_no = 0;
372 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
373 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
374 /* socket above is long gone */
375 asoc->fragmented_delivery_inprogress = 0;
376 chk = TAILQ_FIRST(&asoc->reasmqueue);
378 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
379 asoc->size_on_reasm_queue -= chk->send_size;
380 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
382 * Lose the data pointer, since its in the socket
386 sctp_m_freem(chk->data);
389 /* Now free the address and data */
390 sctp_free_remote_addr(chk->whoTo);
391 sctp_free_a_chunk(stcb, chk);
392 /* sa_ignore FREED_MEMORY */
393 chk = TAILQ_FIRST(&asoc->reasmqueue);
397 SCTP_TCB_LOCK_ASSERT(stcb);
399 chk = TAILQ_FIRST(&asoc->reasmqueue);
403 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
404 /* Can't deliver more :< */
407 stream_no = chk->rec.data.stream_number;
408 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
409 if (nxt_todel != chk->rec.data.stream_seq &&
410 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
412 * Not the next sequence to deliver in its stream OR
417 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
419 control = sctp_build_readq_entry_chk(stcb, chk);
420 if (control == NULL) {
424 /* save it off for our future deliveries */
425 stcb->asoc.control_pdapi = control;
426 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
430 sctp_add_to_readq(stcb->sctp_ep,
431 stcb, control, &stcb->sctp_socket->so_rcv, end);
434 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
438 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
439 stcb->asoc.control_pdapi,
440 chk->data, end, chk->rec.data.TSN_seq,
441 &stcb->sctp_socket->so_rcv)) {
443 * something is very wrong, either
444 * control_pdapi is NULL, or the tail_mbuf
445 * is corrupt, or there is a EOM already on
448 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
449 panic("This should not happen control_pdapi NULL?");
451 /* if we did not panic, it was a EOM */
452 panic("Bad chunking ??");
457 /* pull it we did it */
458 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
459 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
460 asoc->fragmented_delivery_inprogress = 0;
461 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
462 asoc->strmin[stream_no].last_sequence_delivered++;
464 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
465 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
467 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
469 * turn the flag back on since we just delivered
472 asoc->fragmented_delivery_inprogress = 1;
474 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
475 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
476 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
477 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
479 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
480 asoc->size_on_reasm_queue -= chk->send_size;
481 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
482 /* free up the chk */
484 sctp_free_remote_addr(chk->whoTo);
485 sctp_free_a_chunk(stcb, chk);
487 if (asoc->fragmented_delivery_inprogress == 0) {
489 * Now lets see if we can deliver the next one on
493 struct sctp_stream_in *strm;
495 strm = &asoc->strmin[stream_no];
496 nxt_todel = strm->last_sequence_delivered + 1;
497 ctl = TAILQ_FIRST(&strm->inqueue);
498 if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
499 while (ctl != NULL) {
500 /* Deliver more if we can. */
501 if (nxt_todel == ctl->sinfo_ssn) {
502 ctlat = TAILQ_NEXT(ctl, next);
503 TAILQ_REMOVE(&strm->inqueue, ctl, next);
504 asoc->size_on_all_streams -= ctl->length;
505 sctp_ucount_decr(asoc->cnt_on_all_streams);
506 strm->last_sequence_delivered++;
507 sctp_add_to_readq(stcb->sctp_ep, stcb,
509 &stcb->sctp_socket->so_rcv, 1);
514 nxt_todel = strm->last_sequence_delivered + 1;
519 /* sa_ignore FREED_MEMORY */
520 chk = TAILQ_FIRST(&asoc->reasmqueue);
525 * Queue the chunk either right into the socket buffer if it is the next one
526 * to go OR put it in the correct place in the delivery queue. If we do
527 * append to the so_buf, keep doing so until we are out of order. One big
528 * question still remains, what to do when the socket buffer is FULL??
531 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
532 struct sctp_queued_to_read *control, int *abort_flag)
535 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
536 * all the data in one stream this could happen quite rapidly. One
537 * could use the TSN to keep track of things, but this scheme breaks
538 * down in the other type of stream useage that could occur. Send a
539 * single msg to stream 0, send 4Billion messages to stream 1, now
540 * send a message to stream 0. You have a situation where the TSN
541 * has wrapped but not in the stream. Is this worth worrying about
542 * or should we just change our queue sort at the bottom to be by
545 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
546 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
547 * assignment this could happen... and I don't see how this would be
548 * a violation. So for now I am undecided an will leave the sort by
549 * SSN alone. Maybe a hybred approach is the answer
552 struct sctp_stream_in *strm;
553 struct sctp_queued_to_read *at;
559 asoc->size_on_all_streams += control->length;
560 sctp_ucount_incr(asoc->cnt_on_all_streams);
561 strm = &asoc->strmin[control->sinfo_stream];
562 nxt_todel = strm->last_sequence_delivered + 1;
563 #ifdef SCTP_STR_LOGGING
564 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
566 SCTPDBG(SCTP_DEBUG_INDATA1,
567 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
568 (uint32_t) control->sinfo_stream,
569 (uint32_t) strm->last_sequence_delivered,
570 (uint32_t) nxt_todel);
571 if (compare_with_wrap(strm->last_sequence_delivered,
572 control->sinfo_ssn, MAX_SEQ) ||
573 (strm->last_sequence_delivered == control->sinfo_ssn)) {
574 /* The incoming sseq is behind where we last delivered? */
575 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
576 control->sinfo_ssn, strm->last_sequence_delivered);
578 * throw it in the stream so it gets cleaned up in
579 * association destruction
581 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
582 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
583 0, M_DONTWAIT, 1, MT_DATA);
585 struct sctp_paramhdr *ph;
588 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
589 (sizeof(uint32_t) * 3);
590 ph = mtod(oper, struct sctp_paramhdr *);
591 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
592 ph->param_length = htons(SCTP_BUF_LEN(oper));
593 ippp = (uint32_t *) (ph + 1);
594 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
596 *ippp = control->sinfo_tsn;
598 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
600 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
601 sctp_abort_an_association(stcb->sctp_ep, stcb,
602 SCTP_PEER_FAULTY, oper);
608 if (nxt_todel == control->sinfo_ssn) {
609 /* can be delivered right away? */
610 #ifdef SCTP_STR_LOGGING
611 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
614 asoc->size_on_all_streams -= control->length;
615 sctp_ucount_decr(asoc->cnt_on_all_streams);
616 strm->last_sequence_delivered++;
617 sctp_add_to_readq(stcb->sctp_ep, stcb,
619 &stcb->sctp_socket->so_rcv, 1);
620 control = TAILQ_FIRST(&strm->inqueue);
621 while (control != NULL) {
623 nxt_todel = strm->last_sequence_delivered + 1;
624 if (nxt_todel == control->sinfo_ssn) {
625 at = TAILQ_NEXT(control, next);
626 TAILQ_REMOVE(&strm->inqueue, control, next);
627 asoc->size_on_all_streams -= control->length;
628 sctp_ucount_decr(asoc->cnt_on_all_streams);
629 strm->last_sequence_delivered++;
631 * We ignore the return of deliver_data here
632 * since we always can hold the chunk on the
633 * d-queue. And we have a finite number that
634 * can be delivered from the strq.
636 #ifdef SCTP_STR_LOGGING
637 sctp_log_strm_del(control, NULL,
638 SCTP_STR_LOG_FROM_IMMED_DEL);
640 sctp_add_to_readq(stcb->sctp_ep, stcb,
642 &stcb->sctp_socket->so_rcv, 1);
651 * Ok, we did not deliver this guy, find the correct place
652 * to put it on the queue.
654 if (TAILQ_EMPTY(&strm->inqueue)) {
656 #ifdef SCTP_STR_LOGGING
657 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
659 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
661 TAILQ_FOREACH(at, &strm->inqueue, next) {
662 if (compare_with_wrap(at->sinfo_ssn,
663 control->sinfo_ssn, MAX_SEQ)) {
665 * one in queue is bigger than the
666 * new one, insert before this one
668 #ifdef SCTP_STR_LOGGING
669 sctp_log_strm_del(control, at,
670 SCTP_STR_LOG_FROM_INSERT_MD);
672 TAILQ_INSERT_BEFORE(at, control, next);
674 } else if (at->sinfo_ssn == control->sinfo_ssn) {
676 * Gak, He sent me a duplicate str
680 * foo bar, I guess I will just free
681 * this new guy, should we abort
682 * too? FIX ME MAYBE? Or it COULD be
683 * that the SSN's have wrapped.
684 * Maybe I should compare to TSN
685 * somehow... sigh for now just blow
690 sctp_m_freem(control->data);
691 control->data = NULL;
692 asoc->size_on_all_streams -= control->length;
693 sctp_ucount_decr(asoc->cnt_on_all_streams);
694 sctp_free_remote_addr(control->whoFrom);
695 sctp_free_a_readq(stcb, control);
698 if (TAILQ_NEXT(at, next) == NULL) {
700 * We are at the end, insert
703 #ifdef SCTP_STR_LOGGING
704 sctp_log_strm_del(control, at,
705 SCTP_STR_LOG_FROM_INSERT_TL);
707 TAILQ_INSERT_AFTER(&strm->inqueue,
718 * Returns two things: You get the total size of the deliverable parts of the
719 * first fragmented message on the reassembly queue. And you get a 1 back if
720 * all of the message is ready or a 0 back if the message is still incomplete
723 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
725 struct sctp_tmit_chunk *chk;
729 chk = TAILQ_FIRST(&asoc->reasmqueue);
731 /* nothing on the queue */
734 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
735 /* Not a first on the queue */
738 tsn = chk->rec.data.TSN_seq;
740 if (tsn != chk->rec.data.TSN_seq) {
743 *t_size += chk->send_size;
744 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
748 chk = TAILQ_NEXT(chk, sctp_next);
754 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
756 struct sctp_tmit_chunk *chk;
761 chk = TAILQ_FIRST(&asoc->reasmqueue);
764 asoc->size_on_reasm_queue = 0;
765 asoc->cnt_on_reasm_queue = 0;
768 if (asoc->fragmented_delivery_inprogress == 0) {
770 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
771 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
772 (nxt_todel == chk->rec.data.stream_seq ||
773 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
775 * Yep the first one is here and its ok to deliver
778 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
779 (tsize > stcb->sctp_ep->partial_delivery_point))) {
782 * Yes, we setup to start reception, by
783 * backing down the TSN just in case we
784 * can't deliver. If we
786 asoc->fragmented_delivery_inprogress = 1;
787 asoc->tsn_last_delivered =
788 chk->rec.data.TSN_seq - 1;
790 chk->rec.data.stream_number;
791 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
792 asoc->pdapi_ppid = chk->rec.data.payloadtype;
793 asoc->fragment_flags = chk->rec.data.rcv_flags;
794 sctp_service_reassembly(stcb, asoc);
799 * Service re-assembly will deliver stream data queued at
800 * the end of fragmented delivery.. but it wont know to go
801 * back and call itself again... we do that here with the
804 sctp_service_reassembly(stcb, asoc);
805 if (asoc->fragmented_delivery_inprogress == 0) {
807 * finished our Fragmented delivery, could be more
816 * Dump onto the re-assembly queue, in its proper place. After dumping on the
817 * queue, see if anthing can be delivered. If so pull it off (or as much as
818 * we can. If we run out of space then we must dump what we can and set the
819 * appropriate flag to say we queued what we could.
822 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
823 struct sctp_tmit_chunk *chk, int *abort_flag)
826 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
828 struct sctp_tmit_chunk *at, *prev, *next;
831 cum_ackp1 = asoc->tsn_last_delivered + 1;
832 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
833 /* This is the first one on the queue */
834 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
836 * we do not check for delivery of anything when only one
839 asoc->size_on_reasm_queue = chk->send_size;
840 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
841 if (chk->rec.data.TSN_seq == cum_ackp1) {
842 if (asoc->fragmented_delivery_inprogress == 0 &&
843 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
844 SCTP_DATA_FIRST_FRAG) {
846 * An empty queue, no delivery inprogress,
847 * we hit the next one and it does NOT have
848 * a FIRST fragment mark.
850 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
851 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
852 0, M_DONTWAIT, 1, MT_DATA);
855 struct sctp_paramhdr *ph;
859 sizeof(struct sctp_paramhdr) +
860 (sizeof(uint32_t) * 3);
861 ph = mtod(oper, struct sctp_paramhdr *);
863 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
864 ph->param_length = htons(SCTP_BUF_LEN(oper));
865 ippp = (uint32_t *) (ph + 1);
866 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
868 *ippp = chk->rec.data.TSN_seq;
870 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
873 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
874 sctp_abort_an_association(stcb->sctp_ep, stcb,
875 SCTP_PEER_FAULTY, oper);
877 } else if (asoc->fragmented_delivery_inprogress &&
878 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
880 * We are doing a partial delivery and the
881 * NEXT chunk MUST be either the LAST or
882 * MIDDLE fragment NOT a FIRST
884 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
885 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
886 0, M_DONTWAIT, 1, MT_DATA);
888 struct sctp_paramhdr *ph;
892 sizeof(struct sctp_paramhdr) +
893 (3 * sizeof(uint32_t));
894 ph = mtod(oper, struct sctp_paramhdr *);
896 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
897 ph->param_length = htons(SCTP_BUF_LEN(oper));
898 ippp = (uint32_t *) (ph + 1);
899 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
901 *ippp = chk->rec.data.TSN_seq;
903 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
905 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
906 sctp_abort_an_association(stcb->sctp_ep, stcb,
907 SCTP_PEER_FAULTY, oper);
909 } else if (asoc->fragmented_delivery_inprogress) {
911 * Here we are ok with a MIDDLE or LAST
914 if (chk->rec.data.stream_number !=
915 asoc->str_of_pdapi) {
916 /* Got to be the right STR No */
917 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
918 chk->rec.data.stream_number,
920 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
921 0, M_DONTWAIT, 1, MT_DATA);
923 struct sctp_paramhdr *ph;
927 sizeof(struct sctp_paramhdr) +
928 (sizeof(uint32_t) * 3);
930 struct sctp_paramhdr *);
932 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
934 htons(SCTP_BUF_LEN(oper));
935 ippp = (uint32_t *) (ph + 1);
936 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
938 *ippp = chk->rec.data.TSN_seq;
940 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
942 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
943 sctp_abort_an_association(stcb->sctp_ep,
944 stcb, SCTP_PEER_FAULTY, oper);
946 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
947 SCTP_DATA_UNORDERED &&
948 chk->rec.data.stream_seq !=
949 asoc->ssn_of_pdapi) {
950 /* Got to be the right STR Seq */
951 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
952 chk->rec.data.stream_seq,
954 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
955 0, M_DONTWAIT, 1, MT_DATA);
957 struct sctp_paramhdr *ph;
961 sizeof(struct sctp_paramhdr) +
962 (3 * sizeof(uint32_t));
964 struct sctp_paramhdr *);
966 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
968 htons(SCTP_BUF_LEN(oper));
969 ippp = (uint32_t *) (ph + 1);
970 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
972 *ippp = chk->rec.data.TSN_seq;
974 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
977 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
978 sctp_abort_an_association(stcb->sctp_ep,
979 stcb, SCTP_PEER_FAULTY, oper);
987 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
988 if (compare_with_wrap(at->rec.data.TSN_seq,
989 chk->rec.data.TSN_seq, MAX_TSN)) {
991 * one in queue is bigger than the new one, insert
995 asoc->size_on_reasm_queue += chk->send_size;
996 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
998 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1000 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1001 /* Gak, He sent me a duplicate str seq number */
1003 * foo bar, I guess I will just free this new guy,
1004 * should we abort too? FIX ME MAYBE? Or it COULD be
1005 * that the SSN's have wrapped. Maybe I should
1006 * compare to TSN somehow... sigh for now just blow
1010 sctp_m_freem(chk->data);
1013 sctp_free_remote_addr(chk->whoTo);
1014 sctp_free_a_chunk(stcb, chk);
1017 last_flags = at->rec.data.rcv_flags;
1018 last_tsn = at->rec.data.TSN_seq;
1020 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1022 * We are at the end, insert it after this
1025 /* check it first */
1026 asoc->size_on_reasm_queue += chk->send_size;
1027 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1028 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1033 /* Now the audits */
1035 prev_tsn = chk->rec.data.TSN_seq - 1;
1036 if (prev_tsn == prev->rec.data.TSN_seq) {
1038 * Ok the one I am dropping onto the end is the
1039 * NEXT. A bit of valdiation here.
1041 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1042 SCTP_DATA_FIRST_FRAG ||
1043 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1044 SCTP_DATA_MIDDLE_FRAG) {
1046 * Insert chk MUST be a MIDDLE or LAST
1049 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1050 SCTP_DATA_FIRST_FRAG) {
1051 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1052 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1053 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1054 0, M_DONTWAIT, 1, MT_DATA);
1056 struct sctp_paramhdr *ph;
1059 SCTP_BUF_LEN(oper) =
1060 sizeof(struct sctp_paramhdr) +
1061 (3 * sizeof(uint32_t));
1063 struct sctp_paramhdr *);
1065 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1067 htons(SCTP_BUF_LEN(oper));
1068 ippp = (uint32_t *) (ph + 1);
1069 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1071 *ippp = chk->rec.data.TSN_seq;
1073 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1076 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1077 sctp_abort_an_association(stcb->sctp_ep,
1078 stcb, SCTP_PEER_FAULTY, oper);
1082 if (chk->rec.data.stream_number !=
1083 prev->rec.data.stream_number) {
1085 * Huh, need the correct STR here,
1086 * they must be the same.
1088 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1089 chk->rec.data.stream_number,
1090 prev->rec.data.stream_number);
1091 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1092 0, M_DONTWAIT, 1, MT_DATA);
1094 struct sctp_paramhdr *ph;
1097 SCTP_BUF_LEN(oper) =
1098 sizeof(struct sctp_paramhdr) +
1099 (3 * sizeof(uint32_t));
1101 struct sctp_paramhdr *);
1103 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1105 htons(SCTP_BUF_LEN(oper));
1106 ippp = (uint32_t *) (ph + 1);
1107 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1109 *ippp = chk->rec.data.TSN_seq;
1111 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1113 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1114 sctp_abort_an_association(stcb->sctp_ep,
1115 stcb, SCTP_PEER_FAULTY, oper);
1120 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1121 chk->rec.data.stream_seq !=
1122 prev->rec.data.stream_seq) {
1124 * Huh, need the correct STR here,
1125 * they must be the same.
1127 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1128 chk->rec.data.stream_seq,
1129 prev->rec.data.stream_seq);
1130 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1131 0, M_DONTWAIT, 1, MT_DATA);
1133 struct sctp_paramhdr *ph;
1136 SCTP_BUF_LEN(oper) =
1137 sizeof(struct sctp_paramhdr) +
1138 (3 * sizeof(uint32_t));
1140 struct sctp_paramhdr *);
1142 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1144 htons(SCTP_BUF_LEN(oper));
1145 ippp = (uint32_t *) (ph + 1);
1146 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1148 *ippp = chk->rec.data.TSN_seq;
1150 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1152 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1153 sctp_abort_an_association(stcb->sctp_ep,
1154 stcb, SCTP_PEER_FAULTY, oper);
1159 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1160 SCTP_DATA_LAST_FRAG) {
1161 /* Insert chk MUST be a FIRST */
1162 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1163 SCTP_DATA_FIRST_FRAG) {
1164 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1165 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1166 0, M_DONTWAIT, 1, MT_DATA);
1168 struct sctp_paramhdr *ph;
1171 SCTP_BUF_LEN(oper) =
1172 sizeof(struct sctp_paramhdr) +
1173 (3 * sizeof(uint32_t));
1175 struct sctp_paramhdr *);
1177 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1179 htons(SCTP_BUF_LEN(oper));
1180 ippp = (uint32_t *) (ph + 1);
1181 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1183 *ippp = chk->rec.data.TSN_seq;
1185 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1188 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1189 sctp_abort_an_association(stcb->sctp_ep,
1190 stcb, SCTP_PEER_FAULTY, oper);
1199 post_tsn = chk->rec.data.TSN_seq + 1;
1200 if (post_tsn == next->rec.data.TSN_seq) {
1202 * Ok the one I am inserting ahead of is my NEXT
1203 * one. A bit of valdiation here.
1205 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1206 /* Insert chk MUST be a last fragment */
1207 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1208 != SCTP_DATA_LAST_FRAG) {
1209 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1210 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1211 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1212 0, M_DONTWAIT, 1, MT_DATA);
1214 struct sctp_paramhdr *ph;
1217 SCTP_BUF_LEN(oper) =
1218 sizeof(struct sctp_paramhdr) +
1219 (3 * sizeof(uint32_t));
1221 struct sctp_paramhdr *);
1223 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1225 htons(SCTP_BUF_LEN(oper));
1226 ippp = (uint32_t *) (ph + 1);
1227 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1229 *ippp = chk->rec.data.TSN_seq;
1231 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1233 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1234 sctp_abort_an_association(stcb->sctp_ep,
1235 stcb, SCTP_PEER_FAULTY, oper);
1240 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1241 SCTP_DATA_MIDDLE_FRAG ||
1242 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1243 SCTP_DATA_LAST_FRAG) {
1245 * Insert chk CAN be MIDDLE or FIRST NOT
1248 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1249 SCTP_DATA_LAST_FRAG) {
1250 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1251 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1252 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1253 0, M_DONTWAIT, 1, MT_DATA);
1255 struct sctp_paramhdr *ph;
1258 SCTP_BUF_LEN(oper) =
1259 sizeof(struct sctp_paramhdr) +
1260 (3 * sizeof(uint32_t));
1262 struct sctp_paramhdr *);
1264 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1266 htons(SCTP_BUF_LEN(oper));
1267 ippp = (uint32_t *) (ph + 1);
1268 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1270 *ippp = chk->rec.data.TSN_seq;
1272 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1275 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1276 sctp_abort_an_association(stcb->sctp_ep,
1277 stcb, SCTP_PEER_FAULTY, oper);
1282 if (chk->rec.data.stream_number !=
1283 next->rec.data.stream_number) {
1285 * Huh, need the correct STR here,
1286 * they must be the same.
1288 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1289 chk->rec.data.stream_number,
1290 next->rec.data.stream_number);
1291 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1292 0, M_DONTWAIT, 1, MT_DATA);
1294 struct sctp_paramhdr *ph;
1297 SCTP_BUF_LEN(oper) =
1298 sizeof(struct sctp_paramhdr) +
1299 (3 * sizeof(uint32_t));
1301 struct sctp_paramhdr *);
1303 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1305 htons(SCTP_BUF_LEN(oper));
1306 ippp = (uint32_t *) (ph + 1);
1307 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1309 *ippp = chk->rec.data.TSN_seq;
1311 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1314 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1315 sctp_abort_an_association(stcb->sctp_ep,
1316 stcb, SCTP_PEER_FAULTY, oper);
1321 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1322 chk->rec.data.stream_seq !=
1323 next->rec.data.stream_seq) {
1325 * Huh, need the correct STR here,
1326 * they must be the same.
1328 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1329 chk->rec.data.stream_seq,
1330 next->rec.data.stream_seq);
1331 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1332 0, M_DONTWAIT, 1, MT_DATA);
1334 struct sctp_paramhdr *ph;
1337 SCTP_BUF_LEN(oper) =
1338 sizeof(struct sctp_paramhdr) +
1339 (3 * sizeof(uint32_t));
1341 struct sctp_paramhdr *);
1343 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1345 htons(SCTP_BUF_LEN(oper));
1346 ippp = (uint32_t *) (ph + 1);
1347 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1349 *ippp = chk->rec.data.TSN_seq;
1351 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1353 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1354 sctp_abort_an_association(stcb->sctp_ep,
1355 stcb, SCTP_PEER_FAULTY, oper);
1364 /* Do we need to do some delivery? check */
1365 sctp_deliver_reasm_check(stcb, asoc);
1369 * This is an unfortunate routine. It checks to make sure a evil guy is not
1370 * stuffing us full of bad packet fragments. A broken peer could also do this
1371 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1375 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1378 struct sctp_tmit_chunk *at;
1381 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1382 if (compare_with_wrap(TSN_seq,
1383 at->rec.data.TSN_seq, MAX_TSN)) {
1384 /* is it one bigger? */
1385 tsn_est = at->rec.data.TSN_seq + 1;
1386 if (tsn_est == TSN_seq) {
1387 /* yep. It better be a last then */
1388 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1389 SCTP_DATA_LAST_FRAG) {
1391 * Ok this guy belongs next to a guy
1392 * that is NOT last, it should be a
1393 * middle/last, not a complete
1399 * This guy is ok since its a LAST
1400 * and the new chunk is a fully
1401 * self- contained one.
1406 } else if (TSN_seq == at->rec.data.TSN_seq) {
1407 /* Software error since I have a dup? */
1411 * Ok, 'at' is larger than new chunk but does it
1412 * need to be right before it.
1414 tsn_est = TSN_seq + 1;
1415 if (tsn_est == at->rec.data.TSN_seq) {
1416 /* Yep, It better be a first */
1417 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1418 SCTP_DATA_FIRST_FRAG) {
1431 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1432 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1433 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1434 int *break_flag, int last_chunk)
1436 /* Process a data chunk */
1437 /* struct sctp_tmit_chunk *chk; */
1438 struct sctp_tmit_chunk *chk;
1442 int need_reasm_check = 0;
1443 uint16_t strmno, strmseq;
1445 struct sctp_queued_to_read *control;
1447 uint32_t protocol_id;
1448 uint8_t chunk_flags;
1449 struct sctp_stream_reset_list *liste;
1452 tsn = ntohl(ch->dp.tsn);
1453 chunk_flags = ch->ch.chunk_flags;
1454 protocol_id = ch->dp.protocol_id;
1455 ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
1456 #ifdef SCTP_MAP_LOGGING
1457 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
1462 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1463 asoc->cumulative_tsn == tsn) {
1464 /* It is a duplicate */
1465 SCTP_STAT_INCR(sctps_recvdupdata);
1466 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1467 /* Record a dup for the next outbound sack */
1468 asoc->dup_tsns[asoc->numduptsns] = tsn;
1473 /* Calculate the number of TSN's between the base and this TSN */
1474 if (tsn >= asoc->mapping_array_base_tsn) {
1475 gap = tsn - asoc->mapping_array_base_tsn;
1477 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1479 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1480 /* Can't hold the bit in the mapping at max array, toss it */
1483 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1484 if (sctp_expand_mapping_array(asoc)) {
1485 /* Can't expand, drop it */
1489 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1492 /* See if we have received this one already */
1493 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1494 SCTP_STAT_INCR(sctps_recvdupdata);
1495 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1496 /* Record a dup for the next outbound sack */
1497 asoc->dup_tsns[asoc->numduptsns] = tsn;
1500 asoc->send_sack = 1;
1504 * Check to see about the GONE flag, duplicates would cause a sack
1505 * to be sent up above
1507 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1508 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1509 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1512 * wait a minute, this guy is gone, there is no longer a
1513 * receiver. Send peer an ABORT!
1515 struct mbuf *op_err;
1517 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1518 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
1523 * Now before going further we see if there is room. If NOT then we
1524 * MAY let one through only IF this TSN is the one we are waiting
1525 * for on a partial delivery API.
1528 /* now do the tests */
1529 if (((asoc->cnt_on_all_streams +
1530 asoc->cnt_on_reasm_queue +
1531 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1532 (((int)asoc->my_rwnd) <= 0)) {
1534 * When we have NO room in the rwnd we check to make sure
1535 * the reader is doing its job...
1537 if (stcb->sctp_socket->so_rcv.sb_cc) {
1538 /* some to read, wake-up */
1539 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1541 /* now is it in the mapping array of what we have accepted? */
1542 if (compare_with_wrap(tsn,
1543 asoc->highest_tsn_inside_map, MAX_TSN)) {
1545 /* Nope not in the valid range dump it */
1546 SCTPDBG(SCTP_DEBUG_INDATA1, "My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n",
1547 (u_long)tsn, (u_long)asoc->my_rwnd,
1548 sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv));
1549 sctp_set_rwnd(stcb, asoc);
1550 if ((asoc->cnt_on_all_streams +
1551 asoc->cnt_on_reasm_queue +
1552 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1553 SCTP_STAT_INCR(sctps_datadropchklmt);
1555 SCTP_STAT_INCR(sctps_datadroprwnd);
1562 strmno = ntohs(ch->dp.stream_id);
1563 if (strmno >= asoc->streamincnt) {
1564 struct sctp_paramhdr *phdr;
1567 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1568 0, M_DONTWAIT, 1, MT_DATA);
1570 /* add some space up front so prepend will work well */
1571 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1572 phdr = mtod(mb, struct sctp_paramhdr *);
1574 * Error causes are just param's and this one has
1575 * two back to back phdr, one with the error type
1576 * and size, the other with the streamid and a rsvd
1578 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1579 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1580 phdr->param_length =
1581 htons(sizeof(struct sctp_paramhdr) * 2);
1583 /* We insert the stream in the type field */
1584 phdr->param_type = ch->dp.stream_id;
1585 /* And set the length to 0 for the rsvd field */
1586 phdr->param_length = 0;
1587 sctp_queue_op_err(stcb, mb);
1589 SCTP_STAT_INCR(sctps_badsid);
1590 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1591 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1592 /* we have a new high score */
1593 asoc->highest_tsn_inside_map = tsn;
1594 #ifdef SCTP_MAP_LOGGING
1595 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1598 if (tsn == (asoc->cumulative_tsn + 1)) {
1599 /* Update cum-ack */
1600 asoc->cumulative_tsn = tsn;
1605 * Before we continue lets validate that we are not being fooled by
1606 * an evil attacker. We can only have 4k chunks based on our TSN
1607 * spread allowed by the mapping array 512 * 8 bits, so there is no
1608 * way our stream sequence numbers could have wrapped. We of course
1609 * only validate the FIRST fragment so the bit must be set.
1611 strmseq = ntohs(ch->dp.stream_sequence);
1612 #ifdef SCTP_ASOCLOG_OF_TSNS
1613 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1614 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1615 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1616 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1617 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1619 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1620 asoc->tsn_in_at = 0;
1621 asoc->tsn_in_wrapped = 1;
1624 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1625 (TAILQ_EMPTY(&asoc->resetHead)) &&
1626 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1627 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1628 strmseq, MAX_SEQ) ||
1629 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1630 /* The incoming sseq is behind where we last delivered? */
1631 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1632 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1633 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1634 0, M_DONTWAIT, 1, MT_DATA);
1636 struct sctp_paramhdr *ph;
1639 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1640 (3 * sizeof(uint32_t));
1641 ph = mtod(oper, struct sctp_paramhdr *);
1642 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1643 ph->param_length = htons(SCTP_BUF_LEN(oper));
1644 ippp = (uint32_t *) (ph + 1);
1645 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1649 *ippp = ((strmno << 16) | strmseq);
1652 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1653 sctp_abort_an_association(stcb->sctp_ep, stcb,
1654 SCTP_PEER_FAULTY, oper);
1658 /************************************
1659 * From here down we may find ch-> invalid
1660 * so its a good idea NOT to use it.
1661 *************************************/
1663 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1664 if (last_chunk == 0) {
1665 dmbuf = SCTP_M_COPYM(*m,
1666 (offset + sizeof(struct sctp_data_chunk)),
1667 the_len, M_DONTWAIT);
1668 #ifdef SCTP_MBUF_LOGGING
1674 if (SCTP_BUF_IS_EXTENDED(mat)) {
1675 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1677 mat = SCTP_BUF_NEXT(mat);
1682 /* We can steal the last chunk */
1686 /* lop off the top part */
1687 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1688 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1689 l_len = SCTP_BUF_LEN(dmbuf);
1692 * need to count up the size hopefully does not hit
1700 l_len += SCTP_BUF_LEN(lat);
1701 lat = SCTP_BUF_NEXT(lat);
1704 if (l_len > the_len) {
1705 /* Trim the end round bytes off too */
1706 m_adj(dmbuf, -(l_len - the_len));
1709 if (dmbuf == NULL) {
1710 SCTP_STAT_INCR(sctps_nomem);
1713 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1714 asoc->fragmented_delivery_inprogress == 0 &&
1715 TAILQ_EMPTY(&asoc->resetHead) &&
1717 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1718 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1719 /* Candidate for express delivery */
1721 * Its not fragmented, No PD-API is up, Nothing in the
1722 * delivery queue, Its un-ordered OR ordered and the next to
1723 * deliver AND nothing else is stuck on the stream queue,
1724 * And there is room for it in the socket buffer. Lets just
1725 * stuff it up the buffer....
1728 /* It would be nice to avoid this copy if we could :< */
1729 sctp_alloc_a_readq(stcb, control);
1730 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1736 if (control == NULL) {
1737 goto failed_express_del;
1739 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1);
1740 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1741 /* for ordered, bump what we delivered */
1742 asoc->strmin[strmno].last_sequence_delivered++;
1744 SCTP_STAT_INCR(sctps_recvexpress);
1745 #ifdef SCTP_STR_LOGGING
1746 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1747 SCTP_STR_LOG_FROM_EXPRS_DEL);
1750 goto finish_express_del;
1753 /* If we reach here this is a new chunk */
1756 /* Express for fragmented delivery? */
1757 if ((asoc->fragmented_delivery_inprogress) &&
1758 (stcb->asoc.control_pdapi) &&
1759 (asoc->str_of_pdapi == strmno) &&
1760 (asoc->ssn_of_pdapi == strmseq)
1762 control = stcb->asoc.control_pdapi;
1763 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1764 /* Can't be another first? */
1765 goto failed_pdapi_express_del;
1767 if (tsn == (control->sinfo_tsn + 1)) {
1768 /* Yep, we can add it on */
1772 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1775 cumack = asoc->cumulative_tsn;
1776 if ((cumack + 1) == tsn)
1779 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1781 &stcb->sctp_socket->so_rcv)) {
1782 SCTP_PRINTF("Append fails end:%d\n", end);
1783 goto failed_pdapi_express_del;
1785 SCTP_STAT_INCR(sctps_recvexpressm);
1786 control->sinfo_tsn = tsn;
1787 asoc->tsn_last_delivered = tsn;
1788 asoc->fragment_flags = chunk_flags;
1789 asoc->tsn_of_pdapi_last_delivered = tsn;
1790 asoc->last_flags_delivered = chunk_flags;
1791 asoc->last_strm_seq_delivered = strmseq;
1792 asoc->last_strm_no_delivered = strmno;
1794 /* clean up the flags and such */
1795 asoc->fragmented_delivery_inprogress = 0;
1796 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1797 asoc->strmin[strmno].last_sequence_delivered++;
1799 stcb->asoc.control_pdapi = NULL;
1800 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1802 * There could be another message
1805 need_reasm_check = 1;
1809 goto finish_express_del;
1812 failed_pdapi_express_del:
1814 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1815 sctp_alloc_a_chunk(stcb, chk);
1817 /* No memory so we drop the chunk */
1818 SCTP_STAT_INCR(sctps_nomem);
1819 if (last_chunk == 0) {
1820 /* we copied it, free the copy */
1821 sctp_m_freem(dmbuf);
1825 chk->rec.data.TSN_seq = tsn;
1826 chk->no_fr_allowed = 0;
1827 chk->rec.data.stream_seq = strmseq;
1828 chk->rec.data.stream_number = strmno;
1829 chk->rec.data.payloadtype = protocol_id;
1830 chk->rec.data.context = stcb->asoc.context;
1831 chk->rec.data.doing_fast_retransmit = 0;
1832 chk->rec.data.rcv_flags = chunk_flags;
1834 chk->send_size = the_len;
1836 atomic_add_int(&net->ref_count, 1);
1839 sctp_alloc_a_readq(stcb, control);
1840 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1846 if (control == NULL) {
1847 /* No memory so we drop the chunk */
1848 SCTP_STAT_INCR(sctps_nomem);
1849 if (last_chunk == 0) {
1850 /* we copied it, free the copy */
1851 sctp_m_freem(dmbuf);
1855 control->length = the_len;
1858 /* Mark it as received */
1859 /* Now queue it where it belongs */
1860 if (control != NULL) {
1861 /* First a sanity check */
1862 if (asoc->fragmented_delivery_inprogress) {
1864 * Ok, we have a fragmented delivery in progress if
1865 * this chunk is next to deliver OR belongs in our
1866 * view to the reassembly, the peer is evil or
1869 uint32_t estimate_tsn;
1871 estimate_tsn = asoc->tsn_last_delivered + 1;
1872 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1873 (estimate_tsn == control->sinfo_tsn)) {
1874 /* Evil/Broke peer */
1875 sctp_m_freem(control->data);
1876 control->data = NULL;
1877 sctp_free_remote_addr(control->whoFrom);
1878 sctp_free_a_readq(stcb, control);
1879 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1880 0, M_DONTWAIT, 1, MT_DATA);
1882 struct sctp_paramhdr *ph;
1885 SCTP_BUF_LEN(oper) =
1886 sizeof(struct sctp_paramhdr) +
1887 (3 * sizeof(uint32_t));
1888 ph = mtod(oper, struct sctp_paramhdr *);
1890 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1891 ph->param_length = htons(SCTP_BUF_LEN(oper));
1892 ippp = (uint32_t *) (ph + 1);
1893 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1897 *ippp = ((strmno << 16) | strmseq);
1899 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1900 sctp_abort_an_association(stcb->sctp_ep, stcb,
1901 SCTP_PEER_FAULTY, oper);
1906 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1907 sctp_m_freem(control->data);
1908 control->data = NULL;
1909 sctp_free_remote_addr(control->whoFrom);
1910 sctp_free_a_readq(stcb, control);
1912 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1913 0, M_DONTWAIT, 1, MT_DATA);
1915 struct sctp_paramhdr *ph;
1918 SCTP_BUF_LEN(oper) =
1919 sizeof(struct sctp_paramhdr) +
1920 (3 * sizeof(uint32_t));
1922 struct sctp_paramhdr *);
1924 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1926 htons(SCTP_BUF_LEN(oper));
1927 ippp = (uint32_t *) (ph + 1);
1928 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1932 *ippp = ((strmno << 16) | strmseq);
1934 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1935 sctp_abort_an_association(stcb->sctp_ep,
1936 stcb, SCTP_PEER_FAULTY, oper);
1943 /* No PDAPI running */
1944 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1946 * Reassembly queue is NOT empty validate
1947 * that this tsn does not need to be in
1948 * reasembly queue. If it does then our peer
1949 * is broken or evil.
1951 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1952 sctp_m_freem(control->data);
1953 control->data = NULL;
1954 sctp_free_remote_addr(control->whoFrom);
1955 sctp_free_a_readq(stcb, control);
1956 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1957 0, M_DONTWAIT, 1, MT_DATA);
1959 struct sctp_paramhdr *ph;
1962 SCTP_BUF_LEN(oper) =
1963 sizeof(struct sctp_paramhdr) +
1964 (3 * sizeof(uint32_t));
1966 struct sctp_paramhdr *);
1968 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1970 htons(SCTP_BUF_LEN(oper));
1971 ippp = (uint32_t *) (ph + 1);
1972 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
1976 *ippp = ((strmno << 16) | strmseq);
1978 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1979 sctp_abort_an_association(stcb->sctp_ep,
1980 stcb, SCTP_PEER_FAULTY, oper);
1987 /* ok, if we reach here we have passed the sanity checks */
1988 if (chunk_flags & SCTP_DATA_UNORDERED) {
1989 /* queue directly into socket buffer */
1990 sctp_add_to_readq(stcb->sctp_ep, stcb,
1992 &stcb->sctp_socket->so_rcv, 1);
1995 * Special check for when streams are resetting. We
1996 * could be more smart about this and check the
1997 * actual stream to see if it is not being reset..
1998 * that way we would not create a HOLB when amongst
1999 * streams being reset and those not being reset.
2001 * We take complete messages that have a stream reset
2002 * intervening (aka the TSN is after where our
2003 * cum-ack needs to be) off and put them on a
2004 * pending_reply_queue. The reassembly ones we do
2005 * not have to worry about since they are all sorted
2006 * and proceessed by TSN order. It is only the
2007 * singletons I must worry about.
2009 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2010 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2013 * yep its past where we need to reset... go
2014 * ahead and queue it.
2016 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2018 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2020 struct sctp_queued_to_read *ctlOn;
2021 unsigned char inserted = 0;
2023 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2025 if (compare_with_wrap(control->sinfo_tsn,
2026 ctlOn->sinfo_tsn, MAX_TSN)) {
2027 ctlOn = TAILQ_NEXT(ctlOn, next);
2030 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2035 if (inserted == 0) {
2037 * must be put at end, use
2038 * prevP (all setup from
2039 * loop) to setup nextP.
2041 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2045 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2052 /* Into the re-assembly queue */
2053 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2056 * the assoc is now gone and chk was put onto the
2057 * reasm queue, which has all been freed.
2064 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2065 /* we have a new high score */
2066 asoc->highest_tsn_inside_map = tsn;
2067 #ifdef SCTP_MAP_LOGGING
2068 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2071 if (tsn == (asoc->cumulative_tsn + 1)) {
2072 /* Update cum-ack */
2073 asoc->cumulative_tsn = tsn;
2079 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2081 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2083 SCTP_STAT_INCR(sctps_recvdata);
2084 /* Set it present please */
2085 #ifdef SCTP_STR_LOGGING
2086 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2088 #ifdef SCTP_MAP_LOGGING
2089 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2090 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2092 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2093 /* check the special flag for stream resets */
2094 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2095 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2096 (asoc->cumulative_tsn == liste->tsn))
2099 * we have finished working through the backlogged TSN's now
2100 * time to reset streams. 1: call reset function. 2: free
2101 * pending_reply space 3: distribute any chunks in
2102 * pending_reply_queue.
2104 struct sctp_queued_to_read *ctl;
2106 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2107 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2109 /* sa_ignore FREED_MEMORY */
2110 liste = TAILQ_FIRST(&asoc->resetHead);
2111 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2112 if (ctl && (liste == NULL)) {
2113 /* All can be removed */
2115 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2116 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2120 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2123 /* more than one in queue */
2124 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2126 * if ctl->sinfo_tsn is <= liste->tsn we can
2127 * process it which is the NOT of
2128 * ctl->sinfo_tsn > liste->tsn
2130 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2131 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2135 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2139 * Now service re-assembly to pick up anything that has been
2140 * held on reassembly queue?
2142 sctp_deliver_reasm_check(stcb, asoc);
2143 need_reasm_check = 0;
2145 if (need_reasm_check) {
2146 /* Another one waits ? */
2147 sctp_deliver_reasm_check(stcb, asoc);
2152 int8_t sctp_map_lookup_tab[256] = {
2153 -1, 0, -1, 1, -1, 0, -1, 2,
2154 -1, 0, -1, 1, -1, 0, -1, 3,
2155 -1, 0, -1, 1, -1, 0, -1, 2,
2156 -1, 0, -1, 1, -1, 0, -1, 4,
2157 -1, 0, -1, 1, -1, 0, -1, 2,
2158 -1, 0, -1, 1, -1, 0, -1, 3,
2159 -1, 0, -1, 1, -1, 0, -1, 2,
2160 -1, 0, -1, 1, -1, 0, -1, 5,
2161 -1, 0, -1, 1, -1, 0, -1, 2,
2162 -1, 0, -1, 1, -1, 0, -1, 3,
2163 -1, 0, -1, 1, -1, 0, -1, 2,
2164 -1, 0, -1, 1, -1, 0, -1, 4,
2165 -1, 0, -1, 1, -1, 0, -1, 2,
2166 -1, 0, -1, 1, -1, 0, -1, 3,
2167 -1, 0, -1, 1, -1, 0, -1, 2,
2168 -1, 0, -1, 1, -1, 0, -1, 6,
2169 -1, 0, -1, 1, -1, 0, -1, 2,
2170 -1, 0, -1, 1, -1, 0, -1, 3,
2171 -1, 0, -1, 1, -1, 0, -1, 2,
2172 -1, 0, -1, 1, -1, 0, -1, 4,
2173 -1, 0, -1, 1, -1, 0, -1, 2,
2174 -1, 0, -1, 1, -1, 0, -1, 3,
2175 -1, 0, -1, 1, -1, 0, -1, 2,
2176 -1, 0, -1, 1, -1, 0, -1, 5,
2177 -1, 0, -1, 1, -1, 0, -1, 2,
2178 -1, 0, -1, 1, -1, 0, -1, 3,
2179 -1, 0, -1, 1, -1, 0, -1, 2,
2180 -1, 0, -1, 1, -1, 0, -1, 4,
2181 -1, 0, -1, 1, -1, 0, -1, 2,
2182 -1, 0, -1, 1, -1, 0, -1, 3,
2183 -1, 0, -1, 1, -1, 0, -1, 2,
2184 -1, 0, -1, 1, -1, 0, -1, 7,
2189 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2192 * Now we also need to check the mapping array in a couple of ways.
2193 * 1) Did we move the cum-ack point?
2195 struct sctp_association *asoc;
2197 int all_ones, last_all_ones = 0;
2198 int slide_from, slide_end, lgap, distance;
2200 #ifdef SCTP_MAP_LOGGING
2201 uint32_t old_cumack, old_base, old_highest;
2202 unsigned char aux_array[64];
2209 #ifdef SCTP_MAP_LOGGING
2210 old_cumack = asoc->cumulative_tsn;
2211 old_base = asoc->mapping_array_base_tsn;
2212 old_highest = asoc->highest_tsn_inside_map;
2213 if (asoc->mapping_array_size < 64)
2214 memcpy(aux_array, asoc->mapping_array,
2215 asoc->mapping_array_size);
2217 memcpy(aux_array, asoc->mapping_array, 64);
2221 * We could probably improve this a small bit by calculating the
2222 * offset of the current cum-ack as the starting point.
2226 for (i = 0; i < stcb->asoc.mapping_array_size; i++) {
2228 if (asoc->mapping_array[i] == 0xff) {
2232 /* there is a 0 bit */
2234 at += sctp_map_lookup_tab[asoc->mapping_array[i]];
2239 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones);
2240 /* at is one off, since in the table a embedded -1 is present */
2243 if (compare_with_wrap(asoc->cumulative_tsn,
2244 asoc->highest_tsn_inside_map,
2247 panic("huh, cumack greater than high-tsn in map");
2249 SCTP_PRINTF("huh, cumack greater than high-tsn in map - should panic?\n");
2250 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2254 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
2255 /* The complete array was completed by a single FR */
2256 /* higest becomes the cum-ack */
2259 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2260 /* clear the array */
2262 clr = asoc->mapping_array_size;
2264 clr = (at >> 3) + 1;
2266 * this should be the allones case but just in case
2269 if (clr > asoc->mapping_array_size)
2270 clr = asoc->mapping_array_size;
2272 memset(asoc->mapping_array, 0, clr);
2273 /* base becomes one ahead of the cum-ack */
2274 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2275 #ifdef SCTP_MAP_LOGGING
2276 sctp_log_map(old_base, old_cumack, old_highest,
2277 SCTP_MAP_PREPARE_SLIDE);
2278 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2279 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2281 } else if (at >= 8) {
2282 /* we can slide the mapping array down */
2283 /* Calculate the new byte postion we can move down */
2284 slide_from = at >> 3;
2286 * now calculate the ceiling of the move using our highest
2289 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2290 lgap = asoc->highest_tsn_inside_map -
2291 asoc->mapping_array_base_tsn;
2293 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2294 asoc->highest_tsn_inside_map + 1;
2296 slide_end = lgap >> 3;
2297 if (slide_end < slide_from) {
2298 panic("impossible slide");
2300 distance = (slide_end - slide_from) + 1;
2301 #ifdef SCTP_MAP_LOGGING
2302 sctp_log_map(old_base, old_cumack, old_highest,
2303 SCTP_MAP_PREPARE_SLIDE);
2304 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2305 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2307 if (distance + slide_from > asoc->mapping_array_size ||
2310 * Here we do NOT slide forward the array so that
2311 * hopefully when more data comes in to fill it up
2312 * we will be able to slide it forward. Really I
2313 * don't think this should happen :-0
2316 #ifdef SCTP_MAP_LOGGING
2317 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2318 (uint32_t) asoc->mapping_array_size,
2319 SCTP_MAP_SLIDE_NONE);
2324 for (ii = 0; ii < distance; ii++) {
2325 asoc->mapping_array[ii] =
2326 asoc->mapping_array[slide_from + ii];
2328 for (ii = distance; ii <= slide_end; ii++) {
2329 asoc->mapping_array[ii] = 0;
2331 asoc->mapping_array_base_tsn += (slide_from << 3);
2332 #ifdef SCTP_MAP_LOGGING
2333 sctp_log_map(asoc->mapping_array_base_tsn,
2334 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2335 SCTP_MAP_SLIDE_RESULT);
2340 * Now we need to see if we need to queue a sack or just start the
2341 * timer (if allowed).
2344 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2346 * Ok special case, in SHUTDOWN-SENT case. here we
2347 * maker sure SACK timer is off and instead send a
2348 * SHUTDOWN and a SACK
2350 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2351 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2352 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2354 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2355 sctp_send_sack(stcb);
2359 /* is there a gap now ? */
2360 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2361 stcb->asoc.cumulative_tsn, MAX_TSN);
2364 * CMT DAC algorithm: increase number of packets
2365 * received since last ack
2367 stcb->asoc.cmt_dac_pkts_rcvd++;
2369 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2371 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2373 (stcb->asoc.numduptsns) || /* we have dup's */
2374 (is_a_gap) || /* is still a gap */
2375 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2376 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2379 if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) &&
2380 (stcb->asoc.send_sack == 0) &&
2381 (stcb->asoc.numduptsns == 0) &&
2382 (stcb->asoc.delayed_ack) &&
2383 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2386 * CMT DAC algorithm: With CMT,
2387 * delay acks even in the face of
2389 * reordering. Therefore, if acks that
2390 * do not have to be sent because of
2391 * the above reasons, will be
2392 * delayed. That is, acks that would
2393 * have been sent due to gap reports
2394 * will be delayed with DAC. Start
2395 * the delayed ack timer.
2397 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2398 stcb->sctp_ep, stcb, NULL);
2401 * Ok we must build a SACK since the
2402 * timer is pending, we got our
2403 * first packet OR there are gaps or
2406 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2407 sctp_send_sack(stcb);
2410 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2411 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2412 stcb->sctp_ep, stcb, NULL);
2420 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2422 struct sctp_tmit_chunk *chk;
2426 if (asoc->fragmented_delivery_inprogress) {
2427 sctp_service_reassembly(stcb, asoc);
2429 /* Can we proceed further, i.e. the PD-API is complete */
2430 if (asoc->fragmented_delivery_inprogress) {
2435 * Now is there some other chunk I can deliver from the reassembly
2439 chk = TAILQ_FIRST(&asoc->reasmqueue);
2441 asoc->size_on_reasm_queue = 0;
2442 asoc->cnt_on_reasm_queue = 0;
2445 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2446 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2447 ((nxt_todel == chk->rec.data.stream_seq) ||
2448 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2450 * Yep the first one is here. We setup to start reception,
2451 * by backing down the TSN just in case we can't deliver.
2455 * Before we start though either all of the message should
2456 * be here or 1/4 the socket buffer max or nothing on the
2457 * delivery queue and something can be delivered.
2459 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2460 (tsize > stcb->sctp_ep->partial_delivery_point))) {
2461 asoc->fragmented_delivery_inprogress = 1;
2462 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2463 asoc->str_of_pdapi = chk->rec.data.stream_number;
2464 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2465 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2466 asoc->fragment_flags = chk->rec.data.rcv_flags;
2467 sctp_service_reassembly(stcb, asoc);
2468 if (asoc->fragmented_delivery_inprogress == 0) {
2476 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2477 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2478 struct sctp_nets *net, uint32_t * high_tsn)
2480 struct sctp_data_chunk *ch, chunk_buf;
2481 struct sctp_association *asoc;
2482 int num_chunks = 0; /* number of control chunks processed */
2484 int chk_length, break_flag, last_chunk;
2485 int abort_flag = 0, was_a_gap = 0;
2489 sctp_set_rwnd(stcb, &stcb->asoc);
2492 SCTP_TCB_LOCK_ASSERT(stcb);
2494 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2495 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2496 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
2498 * wait a minute, this guy is gone, there is no longer a
2499 * receiver. Send peer an ABORT!
2501 struct mbuf *op_err;
2503 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2504 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
2507 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2508 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2509 /* there was a gap before this data was processed */
2513 * setup where we got the last DATA packet from for any SACK that
2514 * may need to go out. Don't bump the net. This is done ONLY when a
2515 * chunk is assigned.
2517 asoc->last_data_chunk_from = net;
2520 * Now before we proceed we must figure out if this is a wasted
2521 * cluster... i.e. it is a small packet sent in and yet the driver
2522 * underneath allocated a full cluster for it. If so we must copy it
2523 * to a smaller mbuf and free up the cluster mbuf. This will help
2524 * with cluster starvation. Note for __Panda__ we don't do this
2525 * since it has clusters all the way down to 64 bytes.
2527 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2528 /* we only handle mbufs that are singletons.. not chains */
2529 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2531 /* ok lets see if we can copy the data up */
2534 /* get the pointers and copy */
2535 to = mtod(m, caddr_t *);
2536 from = mtod((*mm), caddr_t *);
2537 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2538 /* copy the length and free up the old */
2539 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2541 /* sucess, back copy */
2544 /* We are in trouble in the mbuf world .. yikes */
2548 /* get pointer to the first chunk header */
2549 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2550 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2555 * process all DATA chunks...
2557 *high_tsn = asoc->cumulative_tsn;
2559 asoc->data_pkts_seen++;
2560 while (stop_proc == 0) {
2561 /* validate chunk length */
2562 chk_length = ntohs(ch->ch.chunk_length);
2563 if (length - *offset < chk_length) {
2564 /* all done, mutulated chunk */
2568 if (ch->ch.chunk_type == SCTP_DATA) {
2569 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2571 * Need to send an abort since we had a
2572 * invalid data chunk.
2574 struct mbuf *op_err;
2576 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2577 0, M_DONTWAIT, 1, MT_DATA);
2580 struct sctp_paramhdr *ph;
2583 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2584 (2 * sizeof(uint32_t));
2585 ph = mtod(op_err, struct sctp_paramhdr *);
2587 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2588 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2589 ippp = (uint32_t *) (ph + 1);
2590 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2592 *ippp = asoc->cumulative_tsn;
2595 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2596 sctp_abort_association(inp, stcb, m, iphlen, sh,
2600 #ifdef SCTP_AUDITING_ENABLED
2601 sctp_audit_log(0xB1, 0);
2603 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2608 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2609 chk_length, net, high_tsn, &abort_flag, &break_flag,
2618 * Set because of out of rwnd space and no
2619 * drop rep space left.
2625 /* not a data chunk in the data region */
2626 switch (ch->ch.chunk_type) {
2627 case SCTP_INITIATION:
2628 case SCTP_INITIATION_ACK:
2629 case SCTP_SELECTIVE_ACK:
2630 case SCTP_HEARTBEAT_REQUEST:
2631 case SCTP_HEARTBEAT_ACK:
2632 case SCTP_ABORT_ASSOCIATION:
2634 case SCTP_SHUTDOWN_ACK:
2635 case SCTP_OPERATION_ERROR:
2636 case SCTP_COOKIE_ECHO:
2637 case SCTP_COOKIE_ACK:
2640 case SCTP_SHUTDOWN_COMPLETE:
2641 case SCTP_AUTHENTICATION:
2642 case SCTP_ASCONF_ACK:
2643 case SCTP_PACKET_DROPPED:
2644 case SCTP_STREAM_RESET:
2645 case SCTP_FORWARD_CUM_TSN:
2648 * Now, what do we do with KNOWN chunks that
2649 * are NOT in the right place?
2651 * For now, I do nothing but ignore them. We
2652 * may later want to add sysctl stuff to
2653 * switch out and do either an ABORT() or
2654 * possibly process them.
2656 if (sctp_strict_data_order) {
2657 struct mbuf *op_err;
2659 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2660 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, 0);
2665 /* unknown chunk type, use bit rules */
2666 if (ch->ch.chunk_type & 0x40) {
2667 /* Add a error report to the queue */
2669 struct sctp_paramhdr *phd;
2671 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2673 phd = mtod(merr, struct sctp_paramhdr *);
2675 * We cheat and use param
2676 * type since we did not
2677 * bother to define a error
2678 * cause struct. They are
2679 * the same basic format
2680 * with different names.
2683 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2685 htons(chk_length + sizeof(*phd));
2686 SCTP_BUF_LEN(merr) = sizeof(*phd);
2687 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2688 SCTP_SIZE32(chk_length),
2690 if (SCTP_BUF_NEXT(merr)) {
2691 sctp_queue_op_err(stcb, merr);
2697 if ((ch->ch.chunk_type & 0x80) == 0) {
2698 /* discard the rest of this packet */
2700 } /* else skip this bad chunk and
2703 }; /* switch of chunk type */
2705 *offset += SCTP_SIZE32(chk_length);
2706 if ((*offset >= length) || stop_proc) {
2707 /* no more data left in the mbuf chain */
2711 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2712 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2722 * we need to report rwnd overrun drops.
2724 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2728 * Did we get data, if so update the time for auto-close and
2729 * give peer credit for being alive.
2731 SCTP_STAT_INCR(sctps_recvpktwithdata);
2732 stcb->asoc.overall_error_count = 0;
2733 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2735 /* now service all of the reassm queue if needed */
2736 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2737 sctp_service_queues(stcb, asoc);
2739 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2740 /* Assure that we ack right away */
2741 stcb->asoc.send_sack = 1;
2743 /* Start a sack timer or QUEUE a SACK for sending */
2744 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
2745 (stcb->asoc.mapping_array[0] != 0xff)) {
2746 if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) ||
2747 (stcb->asoc.delayed_ack == 0) ||
2748 (stcb->asoc.send_sack == 1)) {
2749 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2750 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2752 sctp_send_sack(stcb);
2754 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2755 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2756 stcb->sctp_ep, stcb, NULL);
2760 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2769 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
2770 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2771 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2772 int num_seg, int *ecn_seg_sums)
2774 /************************************************/
2775 /* process fragments and update sendqueue */
2776 /************************************************/
2777 struct sctp_sack *sack;
2778 struct sctp_gap_ack_block *frag;
2779 struct sctp_tmit_chunk *tp1;
2783 #ifdef SCTP_FR_LOGGING
2787 uint16_t frag_strt, frag_end, primary_flag_set;
2788 u_long last_frag_high;
2791 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
2793 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2794 primary_flag_set = 1;
2796 primary_flag_set = 0;
2800 frag = (struct sctp_gap_ack_block *)((caddr_t)sack +
2801 sizeof(struct sctp_sack));
2804 for (i = 0; i < num_seg; i++) {
2805 frag_strt = ntohs(frag->start);
2806 frag_end = ntohs(frag->end);
2807 /* some sanity checks on the fargment offsets */
2808 if (frag_strt > frag_end) {
2809 /* this one is malformed, skip */
2813 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
2815 *biggest_tsn_acked = frag_end + last_tsn;
2817 /* mark acked dgs and find out the highestTSN being acked */
2819 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2821 /* save the locations of the last frags */
2822 last_frag_high = frag_end + last_tsn;
2825 * now lets see if we need to reset the queue due to
2826 * a out-of-order SACK fragment
2828 if (compare_with_wrap(frag_strt + last_tsn,
2829 last_frag_high, MAX_TSN)) {
2831 * if the new frag starts after the last TSN
2832 * frag covered, we are ok and this one is
2833 * beyond the last one
2838 * ok, they have reset us, so we need to
2839 * reset the queue this will cause extra
2840 * hunting but hey, they chose the
2841 * performance hit when they failed to order
2844 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2846 last_frag_high = frag_end + last_tsn;
2848 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2850 #ifdef SCTP_FR_LOGGING
2851 if (tp1->rec.data.doing_fast_retransmit)
2856 * CMT: CUCv2 algorithm. For each TSN being
2857 * processed from the sent queue, track the
2858 * next expected pseudo-cumack, or
2859 * rtx_pseudo_cumack, if required. Separate
2860 * cumack trackers for first transmissions,
2861 * and retransmissions.
2863 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2864 (tp1->snd_count == 1)) {
2865 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2866 tp1->whoTo->find_pseudo_cumack = 0;
2868 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2869 (tp1->snd_count > 1)) {
2870 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2871 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2873 if (tp1->rec.data.TSN_seq == j) {
2874 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2876 * must be held until
2880 * ECN Nonce: Add the nonce
2881 * value to the sender's
2884 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2886 * If it is less than RESEND, it is
2887 * now no-longer in flight.
2888 * Higher values may already be set
2889 * via previous Gap Ack Blocks...
2890 * i.e. ACKED or RESEND.
2892 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2893 *biggest_newly_acked_tsn, MAX_TSN)) {
2894 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2903 * this_sack_highest_
2907 if (tp1->rec.data.chunk_was_revoked == 0)
2908 tp1->whoTo->saw_newack = 1;
2910 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2911 tp1->whoTo->this_sack_highest_newack,
2913 tp1->whoTo->this_sack_highest_newack =
2914 tp1->rec.data.TSN_seq;
2919 * this_sack_lowest_n
2922 if (*this_sack_lowest_newack == 0) {
2923 #ifdef SCTP_SACK_LOGGING
2924 sctp_log_sack(*this_sack_lowest_newack,
2926 tp1->rec.data.TSN_seq,
2929 SCTP_LOG_TSN_ACKED);
2931 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2936 * (rtx-)pseudo-cumac
2941 * (rtx-)pseudo-cumac
2943 * new_(rtx_)pseudo_c
2951 * (rtx-)pseudo-cumac
2959 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2960 if (tp1->rec.data.chunk_was_revoked == 0) {
2961 tp1->whoTo->new_pseudo_cumack = 1;
2963 tp1->whoTo->find_pseudo_cumack = 1;
2965 #ifdef SCTP_CWND_LOGGING
2966 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2968 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2969 if (tp1->rec.data.chunk_was_revoked == 0) {
2970 tp1->whoTo->new_pseudo_cumack = 1;
2972 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2974 #ifdef SCTP_SACK_LOGGING
2975 sctp_log_sack(*biggest_newly_acked_tsn,
2977 tp1->rec.data.TSN_seq,
2980 SCTP_LOG_TSN_ACKED);
2982 #ifdef SCTP_FLIGHT_LOGGING
2983 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2984 tp1->whoTo->flight_size,
2986 (uintptr_t) tp1->whoTo,
2987 tp1->rec.data.TSN_seq);
2989 sctp_flight_size_decrease(tp1);
2990 sctp_total_flight_decrease(stcb, tp1);
2992 tp1->whoTo->net_ack += tp1->send_size;
2993 if (tp1->snd_count < 2) {
2999 tp1->whoTo->net_ack2 += tp1->send_size;
3006 sctp_calculate_rto(stcb,
3009 &tp1->sent_rcv_time);
3014 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3015 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3016 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3017 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3018 asoc->this_sack_highest_gap,
3020 asoc->this_sack_highest_gap =
3021 tp1->rec.data.TSN_seq;
3023 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3024 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3025 #ifdef SCTP_AUDITING_ENABLED
3026 sctp_audit_log(0xB2,
3027 (asoc->sent_queue_retran_cnt & 0x000000ff));
3032 * All chunks NOT UNSENT
3033 * fall through here and are
3036 tp1->sent = SCTP_DATAGRAM_MARKED;
3037 if (tp1->rec.data.chunk_was_revoked) {
3038 /* deflate the cwnd */
3039 tp1->whoTo->cwnd -= tp1->book_size;
3040 tp1->rec.data.chunk_was_revoked = 0;
3044 } /* if (tp1->TSN_seq == j) */
3045 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
3049 tp1 = TAILQ_NEXT(tp1, sctp_next);
3050 } /* end while (tp1) */
3051 } /* end for (j = fragStart */
3052 frag++; /* next one */
3054 #ifdef SCTP_FR_LOGGING
3056 * if (num_frs) sctp_log_fr(*biggest_tsn_acked,
3057 * *biggest_newly_acked_tsn, last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3063 sctp_check_for_revoked(struct sctp_tcb *stcb,
3064 struct sctp_association *asoc, uint32_t cumack,
3065 u_long biggest_tsn_acked)
3067 struct sctp_tmit_chunk *tp1;
3068 int tot_revoked = 0;
3070 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3072 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3075 * ok this guy is either ACK or MARKED. If it is
3076 * ACKED it has been previously acked but not this
3077 * time i.e. revoked. If it is MARKED it was ACK'ed
3080 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3085 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3086 /* it has been revoked */
3087 tp1->sent = SCTP_DATAGRAM_SENT;
3088 tp1->rec.data.chunk_was_revoked = 1;
3090 * We must add this stuff back in to assure
3091 * timers and such get started.
3093 #ifdef SCTP_FLIGHT_LOGGING
3094 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3095 tp1->whoTo->flight_size,
3097 (uintptr_t) tp1->whoTo,
3098 tp1->rec.data.TSN_seq);
3100 sctp_flight_size_increase(tp1);
3101 sctp_total_flight_increase(stcb, tp1);
3103 * We inflate the cwnd to compensate for our
3104 * artificial inflation of the flight_size.
3106 tp1->whoTo->cwnd += tp1->book_size;
3108 #ifdef SCTP_SACK_LOGGING
3109 sctp_log_sack(asoc->last_acked_seq,
3111 tp1->rec.data.TSN_seq,
3114 SCTP_LOG_TSN_REVOKED);
3116 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3117 /* it has been re-acked in this SACK */
3118 tp1->sent = SCTP_DATAGRAM_ACKED;
3121 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3123 tp1 = TAILQ_NEXT(tp1, sctp_next);
3125 if (tot_revoked > 0) {
3127 * Setup the ecn nonce re-sync point. We do this since once
3128 * data is revoked we begin to retransmit things, which do
3129 * NOT have the ECN bits set. This means we are now out of
3130 * sync and must wait until we get back in sync with the
3131 * peer to check ECN bits.
3133 tp1 = TAILQ_FIRST(&asoc->send_queue);
3135 asoc->nonce_resync_tsn = asoc->sending_seq;
3137 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3139 asoc->nonce_wait_for_ecne = 0;
3140 asoc->nonce_sum_check = 0;
3145 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3146 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3148 struct sctp_tmit_chunk *tp1;
3149 int strike_flag = 0;
3151 int tot_retrans = 0;
3152 uint32_t sending_seq;
3153 struct sctp_nets *net;
3154 int num_dests_sacked = 0;
3157 * select the sending_seq, this is either the next thing ready to be
3158 * sent but not transmitted, OR, the next seq we assign.
3160 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3162 sending_seq = asoc->sending_seq;
3164 sending_seq = tp1->rec.data.TSN_seq;
3167 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3168 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3169 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3170 if (net->saw_newack)
3174 if (stcb->asoc.peer_supports_prsctp) {
3175 (void)SCTP_GETTIME_TIMEVAL(&now);
3177 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3180 if (tp1->no_fr_allowed) {
3181 /* this one had a timeout or something */
3182 tp1 = TAILQ_NEXT(tp1, sctp_next);
3185 #ifdef SCTP_FR_LOGGING
3186 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3187 sctp_log_fr(biggest_tsn_newly_acked,
3188 tp1->rec.data.TSN_seq,
3190 SCTP_FR_LOG_CHECK_STRIKE);
3192 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3194 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3198 if (stcb->asoc.peer_supports_prsctp) {
3199 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3200 /* Is it expired? */
3202 (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3204 /* Yes so drop it */
3205 if (tp1->data != NULL) {
3206 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3207 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3210 tp1 = TAILQ_NEXT(tp1, sctp_next);
3214 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3215 /* Has it been retransmitted tv_sec times? */
3216 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3217 /* Yes, so drop it */
3218 if (tp1->data != NULL) {
3219 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3220 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3223 tp1 = TAILQ_NEXT(tp1, sctp_next);
3228 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3229 asoc->this_sack_highest_gap, MAX_TSN)) {
3230 /* we are beyond the tsn in the sack */
3233 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3234 /* either a RESEND, ACKED, or MARKED */
3236 tp1 = TAILQ_NEXT(tp1, sctp_next);
3240 * CMT : SFR algo (covers part of DAC and HTNA as well)
3242 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3244 * No new acks were receieved for data sent to this
3245 * dest. Therefore, according to the SFR algo for
3246 * CMT, no data sent to this dest can be marked for
3247 * FR using this SACK.
3249 tp1 = TAILQ_NEXT(tp1, sctp_next);
3251 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3252 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3254 * CMT: New acks were receieved for data sent to
3255 * this dest. But no new acks were seen for data
3256 * sent after tp1. Therefore, according to the SFR
3257 * algo for CMT, tp1 cannot be marked for FR using
3258 * this SACK. This step covers part of the DAC algo
3259 * and the HTNA algo as well.
3261 tp1 = TAILQ_NEXT(tp1, sctp_next);
3265 * Here we check to see if we were have already done a FR
3266 * and if so we see if the biggest TSN we saw in the sack is
3267 * smaller than the recovery point. If so we don't strike
3268 * the tsn... otherwise we CAN strike the TSN.
3271 * @@@ JRI: Check for CMT if (accum_moved &&
3272 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3275 if (accum_moved && asoc->fast_retran_loss_recovery) {
3277 * Strike the TSN if in fast-recovery and cum-ack
3280 #ifdef SCTP_FR_LOGGING
3281 sctp_log_fr(biggest_tsn_newly_acked,
3282 tp1->rec.data.TSN_seq,
3284 SCTP_FR_LOG_STRIKE_CHUNK);
3286 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3289 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3291 * CMT DAC algorithm: If SACK flag is set to
3292 * 0, then lowest_newack test will not pass
3293 * because it would have been set to the
3294 * cumack earlier. If not already to be
3295 * rtx'd, If not a mixed sack and if tp1 is
3296 * not between two sacked TSNs, then mark by
3297 * one more. NOTE that we are marking by one
3298 * additional time since the SACK DAC flag
3299 * indicates that two packets have been
3300 * received after this missing TSN.
3302 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3303 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3304 #ifdef SCTP_FR_LOGGING
3305 sctp_log_fr(16 + num_dests_sacked,
3306 tp1->rec.data.TSN_seq,
3308 SCTP_FR_LOG_STRIKE_CHUNK);
3313 } else if (tp1->rec.data.doing_fast_retransmit) {
3315 * For those that have done a FR we must take
3316 * special consideration if we strike. I.e the
3317 * biggest_newly_acked must be higher than the
3318 * sending_seq at the time we did the FR.
3321 #ifdef SCTP_FR_TO_ALTERNATE
3323 * If FR's go to new networks, then we must only do
3324 * this for singly homed asoc's. However if the FR's
3325 * go to the same network (Armando's work) then its
3326 * ok to FR multiple times.
3334 if ((compare_with_wrap(biggest_tsn_newly_acked,
3335 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3336 (biggest_tsn_newly_acked ==
3337 tp1->rec.data.fast_retran_tsn)) {
3339 * Strike the TSN, since this ack is
3340 * beyond where things were when we
3343 #ifdef SCTP_FR_LOGGING
3344 sctp_log_fr(biggest_tsn_newly_acked,
3345 tp1->rec.data.TSN_seq,
3347 SCTP_FR_LOG_STRIKE_CHUNK);
3349 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3353 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3355 * CMT DAC algorithm: If
3356 * SACK flag is set to 0,
3357 * then lowest_newack test
3358 * will not pass because it
3359 * would have been set to
3360 * the cumack earlier. If
3361 * not already to be rtx'd,
3362 * If not a mixed sack and
3363 * if tp1 is not between two
3364 * sacked TSNs, then mark by
3365 * one more. NOTE that we
3366 * are marking by one
3367 * additional time since the
3368 * SACK DAC flag indicates
3369 * that two packets have
3370 * been received after this
3373 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3374 (num_dests_sacked == 1) &&
3375 compare_with_wrap(this_sack_lowest_newack,
3376 tp1->rec.data.TSN_seq, MAX_TSN)) {
3377 #ifdef SCTP_FR_LOGGING
3378 sctp_log_fr(32 + num_dests_sacked,
3379 tp1->rec.data.TSN_seq,
3381 SCTP_FR_LOG_STRIKE_CHUNK);
3383 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3392 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3395 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3396 biggest_tsn_newly_acked, MAX_TSN)) {
3398 * We don't strike these: This is the HTNA
3399 * algorithm i.e. we don't strike If our TSN is
3400 * larger than the Highest TSN Newly Acked.
3404 /* Strike the TSN */
3405 #ifdef SCTP_FR_LOGGING
3406 sctp_log_fr(biggest_tsn_newly_acked,
3407 tp1->rec.data.TSN_seq,
3409 SCTP_FR_LOG_STRIKE_CHUNK);
3411 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3414 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3416 * CMT DAC algorithm: If SACK flag is set to
3417 * 0, then lowest_newack test will not pass
3418 * because it would have been set to the
3419 * cumack earlier. If not already to be
3420 * rtx'd, If not a mixed sack and if tp1 is
3421 * not between two sacked TSNs, then mark by
3422 * one more. NOTE that we are marking by one
3423 * additional time since the SACK DAC flag
3424 * indicates that two packets have been
3425 * received after this missing TSN.
3427 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3428 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3429 #ifdef SCTP_FR_LOGGING
3430 sctp_log_fr(48 + num_dests_sacked,
3431 tp1->rec.data.TSN_seq,
3433 SCTP_FR_LOG_STRIKE_CHUNK);
3439 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3440 /* Increment the count to resend */
3441 struct sctp_nets *alt;
3443 /* printf("OK, we are now ready to FR this guy\n"); */
3444 #ifdef SCTP_FR_LOGGING
3445 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3449 /* This is a subsequent FR */
3450 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3452 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3453 if (sctp_cmt_on_off) {
3455 * CMT: Using RTX_SSTHRESH policy for CMT.
3456 * If CMT is being used, then pick dest with
3457 * largest ssthresh for any retransmission.
3459 tp1->no_fr_allowed = 1;
3461 /* sa_ignore NO_NULL_CHK */
3462 alt = sctp_find_alternate_net(stcb, alt, 1);
3467 * CUCv2: If a different dest is picked for
3468 * the retransmission, then new
3469 * (rtx-)pseudo_cumack needs to be tracked
3470 * for orig dest. Let CUCv2 track new (rtx-)
3471 * pseudo-cumack always.
3474 tp1->whoTo->find_pseudo_cumack = 1;
3475 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3477 } else {/* CMT is OFF */
3479 #ifdef SCTP_FR_TO_ALTERNATE
3480 /* Can we find an alternate? */
3481 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3484 * default behavior is to NOT retransmit
3485 * FR's to an alternate. Armando Caro's
3486 * paper details why.
3492 tp1->rec.data.doing_fast_retransmit = 1;
3494 /* mark the sending seq for possible subsequent FR's */
3496 * printf("Marking TSN for FR new value %x\n",
3497 * (uint32_t)tpi->rec.data.TSN_seq);
3499 if (TAILQ_EMPTY(&asoc->send_queue)) {
3501 * If the queue of send is empty then its
3502 * the next sequence number that will be
3503 * assigned so we subtract one from this to
3504 * get the one we last sent.
3506 tp1->rec.data.fast_retran_tsn = sending_seq;
3509 * If there are chunks on the send queue
3510 * (unsent data that has made it from the
3511 * stream queues but not out the door, we
3512 * take the first one (which will have the
3513 * lowest TSN) and subtract one to get the
3516 struct sctp_tmit_chunk *ttt;
3518 ttt = TAILQ_FIRST(&asoc->send_queue);
3519 tp1->rec.data.fast_retran_tsn =
3520 ttt->rec.data.TSN_seq;
3525 * this guy had a RTO calculation pending on
3530 /* fix counts and things */
3531 #ifdef SCTP_FLIGHT_LOGGING
3532 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3533 tp1->whoTo->flight_size,
3535 (uintptr_t) tp1->whoTo,
3536 tp1->rec.data.TSN_seq);
3539 tp1->whoTo->net_ack++;
3540 sctp_flight_size_decrease(tp1);
3542 #ifdef SCTP_LOG_RWND
3543 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3544 asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh);
3546 /* add back to the rwnd */
3547 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
3549 /* remove from the total flight */
3550 sctp_total_flight_decrease(stcb, tp1);
3551 if (alt != tp1->whoTo) {
3552 /* yes, there is an alternate. */
3553 sctp_free_remote_addr(tp1->whoTo);
3554 /* sa_ignore FREED_MEMORY */
3556 atomic_add_int(&alt->ref_count, 1);
3559 tp1 = TAILQ_NEXT(tp1, sctp_next);
3562 if (tot_retrans > 0) {
3564 * Setup the ecn nonce re-sync point. We do this since once
3565 * we go to FR something we introduce a Karn's rule scenario
3566 * and won't know the totals for the ECN bits.
3568 asoc->nonce_resync_tsn = sending_seq;
3569 asoc->nonce_wait_for_ecne = 0;
3570 asoc->nonce_sum_check = 0;
3574 struct sctp_tmit_chunk *
3575 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3576 struct sctp_association *asoc)
3578 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3582 if (asoc->peer_supports_prsctp == 0) {
3585 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3587 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3588 tp1->sent != SCTP_DATAGRAM_RESEND) {
3589 /* no chance to advance, out of here */
3592 if (!PR_SCTP_ENABLED(tp1->flags)) {
3594 * We can't fwd-tsn past any that are reliable aka
3595 * retransmitted until the asoc fails.
3600 (void)SCTP_GETTIME_TIMEVAL(&now);
3603 tp2 = TAILQ_NEXT(tp1, sctp_next);
3605 * now we got a chunk which is marked for another
3606 * retransmission to a PR-stream but has run out its chances
3607 * already maybe OR has been marked to skip now. Can we skip
3608 * it if its a resend?
3610 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3611 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3613 * Now is this one marked for resend and its time is
3616 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3617 /* Yes so drop it */
3619 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3620 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3625 * No, we are done when hit one for resend
3626 * whos time as not expired.
3632 * Ok now if this chunk is marked to drop it we can clean up
3633 * the chunk, advance our peer ack point and we can check
3636 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3637 /* advance PeerAckPoint goes forward */
3638 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3641 * we don't want to de-queue it here. Just wait for
3642 * the next peer SACK to come with a new cumTSN and
3643 * then the chunk will be droped in the normal
3647 sctp_free_bufspace(stcb, asoc, tp1, 1);
3649 * Maybe there should be another
3652 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3653 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3655 sctp_m_freem(tp1->data);
3657 if (stcb->sctp_socket) {
3658 sctp_sowwakeup(stcb->sctp_ep,
3660 #ifdef SCTP_WAKE_LOGGING
3661 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN);
3667 * If it is still in RESEND we can advance no
3673 * If we hit here we just dumped tp1, move to next tsn on
3681 #ifdef SCTP_HIGH_SPEED
3682 struct sctp_hs_raise_drop {
3685 int32_t drop_percent;
3688 #define SCTP_HS_TABLE_SIZE 73
3690 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
3691 {38, 1, 50}, /* 0 */
3692 {118, 2, 44}, /* 1 */
3693 {221, 3, 41}, /* 2 */
3694 {347, 4, 38}, /* 3 */
3695 {495, 5, 37}, /* 4 */
3696 {663, 6, 35}, /* 5 */
3697 {851, 7, 34}, /* 6 */
3698 {1058, 8, 33}, /* 7 */
3699 {1284, 9, 32}, /* 8 */
3700 {1529, 10, 31}, /* 9 */
3701 {1793, 11, 30}, /* 10 */
3702 {2076, 12, 29}, /* 11 */
3703 {2378, 13, 28}, /* 12 */
3704 {2699, 14, 28}, /* 13 */
3705 {3039, 15, 27}, /* 14 */
3706 {3399, 16, 27}, /* 15 */
3707 {3778, 17, 26}, /* 16 */
3708 {4177, 18, 26}, /* 17 */
3709 {4596, 19, 25}, /* 18 */
3710 {5036, 20, 25}, /* 19 */
3711 {5497, 21, 24}, /* 20 */
3712 {5979, 22, 24}, /* 21 */
3713 {6483, 23, 23}, /* 22 */
3714 {7009, 24, 23}, /* 23 */
3715 {7558, 25, 22}, /* 24 */
3716 {8130, 26, 22}, /* 25 */
3717 {8726, 27, 22}, /* 26 */
3718 {9346, 28, 21}, /* 27 */
3719 {9991, 29, 21}, /* 28 */
3720 {10661, 30, 21}, /* 29 */
3721 {11358, 31, 20}, /* 30 */
3722 {12082, 32, 20}, /* 31 */
3723 {12834, 33, 20}, /* 32 */
3724 {13614, 34, 19}, /* 33 */
3725 {14424, 35, 19}, /* 34 */
3726 {15265, 36, 19}, /* 35 */
3727 {16137, 37, 19}, /* 36 */
3728 {17042, 38, 18}, /* 37 */
3729 {17981, 39, 18}, /* 38 */
3730 {18955, 40, 18}, /* 39 */
3731 {19965, 41, 17}, /* 40 */
3732 {21013, 42, 17}, /* 41 */
3733 {22101, 43, 17}, /* 42 */
3734 {23230, 44, 17}, /* 43 */
3735 {24402, 45, 16}, /* 44 */
3736 {25618, 46, 16}, /* 45 */
3737 {26881, 47, 16}, /* 46 */
3738 {28193, 48, 16}, /* 47 */
3739 {29557, 49, 15}, /* 48 */
3740 {30975, 50, 15}, /* 49 */
3741 {32450, 51, 15}, /* 50 */
3742 {33986, 52, 15}, /* 51 */
3743 {35586, 53, 14}, /* 52 */
3744 {37253, 54, 14}, /* 53 */
3745 {38992, 55, 14}, /* 54 */
3746 {40808, 56, 14}, /* 55 */
3747 {42707, 57, 13}, /* 56 */
3748 {44694, 58, 13}, /* 57 */
3749 {46776, 59, 13}, /* 58 */
3750 {48961, 60, 13}, /* 59 */
3751 {51258, 61, 13}, /* 60 */
3752 {53677, 62, 12}, /* 61 */
3753 {56230, 63, 12}, /* 62 */
3754 {58932, 64, 12}, /* 63 */
3755 {61799, 65, 12}, /* 64 */
3756 {64851, 66, 11}, /* 65 */
3757 {68113, 67, 11}, /* 66 */
3758 {71617, 68, 11}, /* 67 */
3759 {75401, 69, 10}, /* 68 */
3760 {79517, 70, 10}, /* 69 */
3761 {84035, 71, 10}, /* 70 */
3762 {89053, 72, 10}, /* 71 */
3763 {94717, 73, 9} /* 72 */
3767 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
3769 int cur_val, i, indx, incr;
3771 cur_val = net->cwnd >> 10;
3772 indx = SCTP_HS_TABLE_SIZE - 1;
3774 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3776 if (net->net_ack > net->mtu) {
3777 net->cwnd += net->mtu;
3778 #ifdef SCTP_CWND_MONITOR
3779 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS);
3782 net->cwnd += net->net_ack;
3783 #ifdef SCTP_CWND_MONITOR
3784 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
3788 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
3789 if (cur_val < sctp_cwnd_adjust[i].cwnd) {
3794 net->last_hs_used = indx;
3795 incr = ((sctp_cwnd_adjust[indx].increase) << 10);
3797 #ifdef SCTP_CWND_MONITOR
3798 sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS);
3804 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
3806 int cur_val, i, indx;
3808 #ifdef SCTP_CWND_MONITOR
3809 int old_cwnd = net->cwnd;
3813 cur_val = net->cwnd >> 10;
3814 indx = net->last_hs_used;
3815 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3817 net->ssthresh = net->cwnd / 2;
3818 if (net->ssthresh < (net->mtu * 2)) {
3819 net->ssthresh = 2 * net->mtu;
3821 net->cwnd = net->ssthresh;
3823 /* drop by the proper amount */
3824 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
3825 sctp_cwnd_adjust[net->last_hs_used].drop_percent);
3826 net->cwnd = net->ssthresh;
3827 /* now where are we */
3828 indx = net->last_hs_used;
3829 cur_val = net->cwnd >> 10;
3830 /* reset where we are in the table */
3831 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3832 /* feel out of hs */
3833 net->last_hs_used = 0;
3835 for (i = indx; i >= 1; i--) {
3836 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
3840 net->last_hs_used = indx;
3843 #ifdef SCTP_CWND_MONITOR
3844 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
3852 static __inline void
3853 sctp_cwnd_update(struct sctp_tcb *stcb,
3854 struct sctp_association *asoc,
3855 int accum_moved, int reneged_all, int will_exit)
3857 struct sctp_nets *net;
3859 /******************************/
3860 /* update cwnd and Early FR */
3861 /******************************/
3862 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3864 #ifdef JANA_CMT_FAST_RECOVERY
3866 * CMT fast recovery code. Need to debug.
3868 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
3869 if (compare_with_wrap(asoc->last_acked_seq,
3870 net->fast_recovery_tsn, MAX_TSN) ||
3871 (asoc->last_acked_seq == net->fast_recovery_tsn) ||
3872 compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
3873 (net->pseudo_cumack == net->fast_recovery_tsn)) {
3874 net->will_exit_fast_recovery = 1;
3878 if (sctp_early_fr) {
3880 * So, first of all do we need to have a Early FR
3883 if (((TAILQ_FIRST(&asoc->sent_queue)) &&
3884 (net->ref_count > 1) &&
3885 (net->flight_size < net->cwnd)) ||
3888 * yes, so in this case stop it if its
3889 * running, and then restart it. Reneging
3890 * all is a special case where we want to
3891 * run the Early FR timer and then force the
3892 * last few unacked to be sent, causing us
3893 * to illicit a sack with gaps to force out
3896 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
3897 SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
3898 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
3899 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
3901 SCTP_STAT_INCR(sctps_earlyfrstrid);
3902 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
3904 /* No, stop it if its running */
3905 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
3906 SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
3907 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
3908 SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
3912 /* if nothing was acked on this destination skip it */
3913 if (net->net_ack == 0) {
3914 #ifdef SCTP_CWND_LOGGING
3915 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
3919 if (net->net_ack2 > 0) {
3921 * Karn's rule applies to clearing error count, this
3924 net->error_count = 0;
3925 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
3926 SCTP_ADDR_NOT_REACHABLE) {
3927 /* addr came good */
3928 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
3929 net->dest_state |= SCTP_ADDR_REACHABLE;
3930 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3931 SCTP_RECEIVED_SACK, (void *)net);
3932 /* now was it the primary? if so restore */
3933 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
3934 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
3938 #ifdef JANA_CMT_FAST_RECOVERY
3940 * CMT fast recovery code
3943 * if (sctp_cmt_on_off == 1 &&
3944 * net->fast_retran_loss_recovery &&
3945 * net->will_exit_fast_recovery == 0) { // @@@ Do something
3946 * } else if (sctp_cmt_on_off == 0 &&
3947 * asoc->fast_retran_loss_recovery && will_exit == 0) {
3951 if (asoc->fast_retran_loss_recovery && will_exit == 0 && sctp_cmt_on_off == 0) {
3953 * If we are in loss recovery we skip any cwnd
3956 goto skip_cwnd_update;
3959 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
3962 if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) {
3963 /* If the cumulative ack moved we can proceed */
3964 if (net->cwnd <= net->ssthresh) {
3965 /* We are in slow start */
3966 if (net->flight_size + net->net_ack >=
3968 #ifdef SCTP_HIGH_SPEED
3969 sctp_hs_cwnd_increase(stcb, net);
3971 if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) {
3972 net->cwnd += (net->mtu * sctp_L2_abc_variable);
3973 #ifdef SCTP_CWND_MONITOR
3974 sctp_log_cwnd(stcb, net, net->mtu,
3975 SCTP_CWND_LOG_FROM_SS);
3979 net->cwnd += net->net_ack;
3980 #ifdef SCTP_CWND_MONITOR
3981 sctp_log_cwnd(stcb, net, net->net_ack,
3982 SCTP_CWND_LOG_FROM_SS);
3990 dif = net->cwnd - (net->flight_size +
3992 #ifdef SCTP_CWND_LOGGING
3993 sctp_log_cwnd(stcb, net, net->net_ack,
3994 SCTP_CWND_LOG_NOADV_SS);
3998 /* We are in congestion avoidance */
3999 if (net->flight_size + net->net_ack >=
4002 * add to pba only if we had a
4003 * cwnd's worth (or so) in flight OR
4004 * the burst limit was applied.
4006 net->partial_bytes_acked +=
4010 * Do we need to increase (if pba is
4013 if (net->partial_bytes_acked >=
4016 net->partial_bytes_acked) {
4017 net->partial_bytes_acked -=
4020 net->partial_bytes_acked =
4023 net->cwnd += net->mtu;
4024 #ifdef SCTP_CWND_MONITOR
4025 sctp_log_cwnd(stcb, net, net->mtu,
4026 SCTP_CWND_LOG_FROM_CA);
4029 #ifdef SCTP_CWND_LOGGING
4031 sctp_log_cwnd(stcb, net, net->net_ack,
4032 SCTP_CWND_LOG_NOADV_CA);
4038 #ifdef SCTP_CWND_LOGGING
4039 sctp_log_cwnd(stcb, net, net->net_ack,
4040 SCTP_CWND_LOG_NOADV_CA);
4042 dif = net->cwnd - (net->flight_size +
4047 #ifdef SCTP_CWND_LOGGING
4048 sctp_log_cwnd(stcb, net, net->mtu,
4049 SCTP_CWND_LOG_NO_CUMACK);
4054 * NOW, according to Karn's rule do we need to restore the
4055 * RTO timer back? Check our net_ack2. If not set then we
4056 * have a ambiguity.. i.e. all data ack'd was sent to more
4059 if (net->net_ack2) {
4060 /* restore any doubled timers */
4061 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
4062 if (net->RTO < stcb->asoc.minrto) {
4063 net->RTO = stcb->asoc.minrto;
4065 if (net->RTO > stcb->asoc.maxrto) {
4066 net->RTO = stcb->asoc.maxrto;
4073 sctp_fs_audit(struct sctp_association *asoc)
4075 struct sctp_tmit_chunk *chk;
4076 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
4078 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4079 if (chk->sent < SCTP_DATAGRAM_RESEND) {
4081 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
4083 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
4085 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
4092 if ((inflight > 0) || (inbetween > 0)) {
4094 panic("Flight size-express incorrect? \n");
4096 SCTP_PRINTF("Flight size-express incorrect inflight:%d inbetween:%d\n",
4097 inflight, inbetween);
4104 sctp_window_probe_recovery(struct sctp_tcb *stcb,
4105 struct sctp_association *asoc,
4106 struct sctp_nets *net,
4107 struct sctp_tmit_chunk *tp1)
4109 struct sctp_tmit_chunk *chk;
4111 /* First setup this one and get it moved back */
4112 tp1->sent = SCTP_DATAGRAM_UNSENT;
4113 tp1->window_probe = 0;
4114 #ifdef SCTP_FLIGHT_LOGGING
4115 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
4116 tp1->whoTo->flight_size,
4118 (uintptr_t) tp1->whoTo,
4119 tp1->rec.data.TSN_seq);
4121 sctp_flight_size_decrease(tp1);
4122 sctp_total_flight_decrease(stcb, tp1);
4123 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4124 TAILQ_INSERT_HEAD(&asoc->send_queue, tp1, sctp_next);
4125 asoc->sent_queue_cnt--;
4126 asoc->send_queue_cnt++;
4128 * Now all guys marked for RESEND on the sent_queue must be moved
4131 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4132 if (chk->sent == SCTP_DATAGRAM_RESEND) {
4133 /* Another chunk to move */
4134 chk->sent = SCTP_DATAGRAM_UNSENT;
4135 chk->window_probe = 0;
4136 /* It should not be in flight */
4137 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
4138 TAILQ_INSERT_AFTER(&asoc->send_queue, tp1, chk, sctp_next);
4139 asoc->sent_queue_cnt--;
4140 asoc->send_queue_cnt++;
4141 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4148 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4149 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
4151 struct sctp_nets *net;
4152 struct sctp_association *asoc;
4153 struct sctp_tmit_chunk *tp1, *tp2;
4155 int win_probe_recovery = 0;
4156 int win_probe_recovered = 0;
4157 int j, done_once = 0;
4160 #ifdef SCTP_LOG_SACK_ARRIVALS
4161 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
4162 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4164 SCTP_TCB_LOCK_ASSERT(stcb);
4166 old_rwnd = asoc->peers_rwnd;
4167 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
4170 } else if (asoc->last_acked_seq == cumack) {
4171 /* Window update sack */
4172 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4173 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4174 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4175 /* SWS sender side engages */
4176 asoc->peers_rwnd = 0;
4178 if (asoc->peers_rwnd > old_rwnd) {
4184 /* First setup for CC stuff */
4185 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4186 net->prev_cwnd = net->cwnd;
4191 * CMT: Reset CUC and Fast recovery algo variables before
4194 net->new_pseudo_cumack = 0;
4195 net->will_exit_fast_recovery = 0;
4197 if (sctp_strict_sacks) {
4200 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4201 tp1 = TAILQ_LAST(&asoc->sent_queue,
4202 sctpchunk_listhead);
4203 send_s = tp1->rec.data.TSN_seq + 1;
4205 send_s = asoc->sending_seq;
4207 if ((cumack == send_s) ||
4208 compare_with_wrap(cumack, send_s, MAX_TSN)) {
4214 panic("Impossible sack 1");
4218 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4219 0, M_DONTWAIT, 1, MT_DATA);
4221 struct sctp_paramhdr *ph;
4224 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4226 ph = mtod(oper, struct sctp_paramhdr *);
4227 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4228 ph->param_length = htons(SCTP_BUF_LEN(oper));
4229 ippp = (uint32_t *) (ph + 1);
4230 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4232 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4233 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
4238 asoc->this_sack_highest_gap = cumack;
4239 stcb->asoc.overall_error_count = 0;
4240 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
4241 /* process the new consecutive TSN first */
4242 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4244 tp2 = TAILQ_NEXT(tp1, sctp_next);
4245 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
4247 cumack == tp1->rec.data.TSN_seq) {
4248 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4250 * ECN Nonce: Add the nonce to the
4251 * sender's nonce sum
4253 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4254 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4256 * If it is less than ACKED,
4257 * it is now no-longer in
4258 * flight. Higher values may
4259 * occur during marking
4261 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4262 #ifdef SCTP_FLIGHT_LOGGING
4263 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4264 tp1->whoTo->flight_size,
4266 (uintptr_t) tp1->whoTo,
4267 tp1->rec.data.TSN_seq);
4270 sctp_flight_size_decrease(tp1);
4271 sctp_total_flight_decrease(stcb, tp1);
4273 tp1->whoTo->net_ack += tp1->send_size;
4274 if (tp1->snd_count < 2) {
4280 tp1->whoTo->net_ack2 +=
4283 /* update RTO too? */
4286 sctp_calculate_rto(stcb,
4288 &tp1->sent_rcv_time);
4293 * CMT: CUCv2 algorithm.
4294 * From the cumack'd TSNs,
4295 * for each TSN being acked
4296 * for the first time, set
4297 * the following variables
4300 * new_pseudo_cumack will
4301 * trigger a cwnd update.
4302 * find_(rtx_)pseudo_cumack
4303 * will trigger search for
4305 * (rtx-)pseudo-cumack.
4307 tp1->whoTo->new_pseudo_cumack = 1;
4308 tp1->whoTo->find_pseudo_cumack = 1;
4309 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4311 #ifdef SCTP_CWND_LOGGING
4312 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4315 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4316 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4318 if (tp1->rec.data.chunk_was_revoked) {
4319 /* deflate the cwnd */
4320 tp1->whoTo->cwnd -= tp1->book_size;
4321 tp1->rec.data.chunk_was_revoked = 0;
4323 tp1->sent = SCTP_DATAGRAM_ACKED;
4328 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4330 sctp_free_bufspace(stcb, asoc, tp1, 1);
4331 sctp_m_freem(tp1->data);
4333 #ifdef SCTP_SACK_LOGGING
4334 sctp_log_sack(asoc->last_acked_seq,
4336 tp1->rec.data.TSN_seq,
4339 SCTP_LOG_FREE_SENT);
4342 asoc->sent_queue_cnt--;
4343 sctp_free_remote_addr(tp1->whoTo);
4344 sctp_free_a_chunk(stcb, tp1);
4348 if (stcb->sctp_socket) {
4349 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4350 #ifdef SCTP_WAKE_LOGGING
4351 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4353 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4354 #ifdef SCTP_WAKE_LOGGING
4356 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4361 if (asoc->last_acked_seq != cumack)
4362 sctp_cwnd_update(stcb, asoc, 1, 0, 0);
4364 asoc->last_acked_seq = cumack;
4366 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4367 /* nothing left in-flight */
4368 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4369 net->flight_size = 0;
4370 net->partial_bytes_acked = 0;
4372 asoc->total_flight = 0;
4373 asoc->total_flight_count = 0;
4375 /* Fix up the a-p-a-p for future PR-SCTP sends */
4376 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4377 asoc->advanced_peer_ack_point = cumack;
4379 /* ECN Nonce updates */
4380 if (asoc->ecn_nonce_allowed) {
4381 if (asoc->nonce_sum_check) {
4382 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4383 if (asoc->nonce_wait_for_ecne == 0) {
4384 struct sctp_tmit_chunk *lchk;
4386 lchk = TAILQ_FIRST(&asoc->send_queue);
4387 asoc->nonce_wait_for_ecne = 1;
4389 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4391 asoc->nonce_wait_tsn = asoc->sending_seq;
4394 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4395 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4397 * Misbehaving peer. We need
4398 * to react to this guy
4400 asoc->ecn_allowed = 0;
4401 asoc->ecn_nonce_allowed = 0;
4406 /* See if Resynchronization Possible */
4407 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4408 asoc->nonce_sum_check = 1;
4410 * now we must calculate what the base is.
4411 * We do this based on two things, we know
4412 * the total's for all the segments
4413 * gap-acked in the SACK (none), We also
4414 * know the SACK's nonce sum, its in
4415 * nonce_sum_flag. So we can build a truth
4416 * table to back-calculate the new value of
4417 * asoc->nonce_sum_expect_base:
4419 * SACK-flag-Value Seg-Sums Base 0 0 0
4423 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4428 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4429 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4430 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4431 /* SWS sender side engages */
4432 asoc->peers_rwnd = 0;
4434 if (asoc->peers_rwnd > old_rwnd) {
4435 win_probe_recovery = 1;
4437 /* Now assure a timer where data is queued at */
4440 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4441 if (win_probe_recovery && (net->window_probe)) {
4442 net->window_probe = 0;
4443 win_probe_recovered = 1;
4445 * Find first chunk that was used with window probe
4446 * and clear the sent
4448 /* sa_ignore FREED_MEMORY */
4449 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4450 if (tp1->window_probe) {
4451 /* move back to data send queue */
4452 sctp_window_probe_recovery(stcb, asoc, net, tp1);
4457 if (net->flight_size) {
4460 if (net->RTO == 0) {
4461 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4463 to_ticks = MSEC_TO_TICKS(net->RTO);
4466 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4467 sctp_timeout_handler, &net->rxt_timer);
4469 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4470 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4472 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4474 if (sctp_early_fr) {
4475 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4476 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4477 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4478 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4484 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4485 (asoc->sent_queue_retran_cnt == 0) &&
4486 (win_probe_recovered == 0) &&
4488 /* huh, this should not happen */
4489 sctp_fs_audit(asoc);
4490 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4491 net->flight_size = 0;
4493 asoc->total_flight = 0;
4494 asoc->total_flight_count = 0;
4495 asoc->sent_queue_retran_cnt = 0;
4496 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4497 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4498 sctp_flight_size_increase(tp1);
4499 sctp_total_flight_increase(stcb, tp1);
4500 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4501 asoc->sent_queue_retran_cnt++;
4507 /**********************************/
4508 /* Now what about shutdown issues */
4509 /**********************************/
4510 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4511 /* nothing left on sendqueue.. consider done */
4513 if ((asoc->stream_queue_cnt == 1) &&
4514 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4515 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4516 (asoc->locked_on_sending)
4518 struct sctp_stream_queue_pending *sp;
4521 * I may be in a state where we got all across.. but
4522 * cannot write more due to a shutdown... we abort
4523 * since the user did not indicate EOR in this case.
4524 * The sp will be cleaned during free of the asoc.
4526 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4528 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) {
4529 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4530 asoc->locked_on_sending = NULL;
4531 asoc->stream_queue_cnt--;
4534 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4535 (asoc->stream_queue_cnt == 0)) {
4536 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4537 /* Need to abort here */
4543 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4544 0, M_DONTWAIT, 1, MT_DATA);
4546 struct sctp_paramhdr *ph;
4549 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4551 ph = mtod(oper, struct sctp_paramhdr *);
4552 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4553 ph->param_length = htons(SCTP_BUF_LEN(oper));
4554 ippp = (uint32_t *) (ph + 1);
4555 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4557 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4558 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper);
4560 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4561 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4562 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4564 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4565 sctp_stop_timers_for_shutdown(stcb);
4566 sctp_send_shutdown(stcb,
4567 stcb->asoc.primary_destination);
4568 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4569 stcb->sctp_ep, stcb, asoc->primary_destination);
4570 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4571 stcb->sctp_ep, stcb, asoc->primary_destination);
4573 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4574 (asoc->stream_queue_cnt == 0)) {
4575 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4578 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4579 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
4580 sctp_send_shutdown_ack(stcb,
4581 stcb->asoc.primary_destination);
4583 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4584 stcb->sctp_ep, stcb, asoc->primary_destination);
4587 #ifdef SCTP_SACK_RWND_LOGGING
4588 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4590 stcb->asoc.peers_rwnd,
4591 stcb->asoc.total_flight,
4592 stcb->asoc.total_output_queue_size);
4600 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4601 struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd)
4603 struct sctp_association *asoc;
4604 struct sctp_sack *sack;
4605 struct sctp_tmit_chunk *tp1, *tp2;
4606 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4607 this_sack_lowest_newack;
4608 uint32_t sav_cum_ack;
4609 uint16_t num_seg, num_dup;
4610 uint16_t wake_him = 0;
4611 unsigned int sack_length;
4612 uint32_t send_s = 0;
4614 int accum_moved = 0;
4615 int will_exit_fast_recovery = 0;
4616 uint32_t a_rwnd, old_rwnd;
4617 int win_probe_recovery = 0;
4618 int win_probe_recovered = 0;
4619 struct sctp_nets *net = NULL;
4620 int nonce_sum_flag, ecn_seg_sums = 0;
4622 uint8_t reneged_all = 0;
4623 uint8_t cmt_dac_flag;
4626 * we take any chance we can to service our queues since we cannot
4627 * get awoken when the socket is read from :<
4630 * Now perform the actual SACK handling: 1) Verify that it is not an
4631 * old sack, if so discard. 2) If there is nothing left in the send
4632 * queue (cum-ack is equal to last acked) then you have a duplicate
4633 * too, update any rwnd change and verify no timers are running.
4634 * then return. 3) Process any new consequtive data i.e. cum-ack
4635 * moved process these first and note that it moved. 4) Process any
4636 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4637 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4638 * sync up flightsizes and things, stop all timers and also check
4639 * for shutdown_pending state. If so then go ahead and send off the
4640 * shutdown. If in shutdown recv, send off the shutdown-ack and
4641 * start that timer, Ret. 9) Strike any non-acked things and do FR
4642 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4643 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4644 * if in shutdown_recv state.
4646 SCTP_TCB_LOCK_ASSERT(stcb);
4649 this_sack_lowest_newack = 0;
4651 sack_length = (unsigned int)sack_len;
4653 SCTP_STAT_INCR(sctps_slowpath_sack);
4654 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4655 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4656 num_seg = ntohs(sack->num_gap_ack_blks);
4659 #ifdef SCTP_LOG_SACK_ARRIVALS
4660 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4661 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4664 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4665 num_dup = ntohs(sack->num_dup_tsns);
4667 old_rwnd = stcb->asoc.peers_rwnd;
4668 stcb->asoc.overall_error_count = 0;
4670 #ifdef SCTP_SACK_LOGGING
4671 sctp_log_sack(asoc->last_acked_seq,
4678 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING)
4680 int off_to_dup, iii;
4683 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4684 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4685 dupdata = (uint32_t *) ((caddr_t)ch + off_to_dup);
4686 for (iii = 0; iii < num_dup; iii++) {
4687 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4692 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4693 off_to_dup, num_dup, sack_length, num_seg);
4697 if (sctp_strict_sacks) {
4699 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4700 tp1 = TAILQ_LAST(&asoc->sent_queue,
4701 sctpchunk_listhead);
4702 send_s = tp1->rec.data.TSN_seq + 1;
4704 send_s = asoc->sending_seq;
4706 if (cum_ack == send_s ||
4707 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4714 panic("Impossible sack 1");
4719 * no way, we have not even sent this TSN out yet.
4720 * Peer is hopelessly messed up with us.
4725 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4726 0, M_DONTWAIT, 1, MT_DATA);
4728 struct sctp_paramhdr *ph;
4731 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4733 ph = mtod(oper, struct sctp_paramhdr *);
4734 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4735 ph->param_length = htons(SCTP_BUF_LEN(oper));
4736 ippp = (uint32_t *) (ph + 1);
4737 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4739 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4740 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
4745 /**********************/
4746 /* 1) check the range */
4747 /**********************/
4748 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4749 /* acking something behind */
4752 sav_cum_ack = asoc->last_acked_seq;
4754 /* update the Rwnd of the peer */
4755 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4756 TAILQ_EMPTY(&asoc->send_queue) &&
4757 (asoc->stream_queue_cnt == 0)
4759 /* nothing left on send/sent and strmq */
4760 #ifdef SCTP_LOG_RWND
4761 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4762 asoc->peers_rwnd, 0, 0, a_rwnd);
4764 asoc->peers_rwnd = a_rwnd;
4765 if (asoc->sent_queue_retran_cnt) {
4766 asoc->sent_queue_retran_cnt = 0;
4768 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4769 /* SWS sender side engages */
4770 asoc->peers_rwnd = 0;
4772 /* stop any timers */
4773 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4774 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4775 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4776 if (sctp_early_fr) {
4777 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4778 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4779 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4780 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4783 net->partial_bytes_acked = 0;
4784 net->flight_size = 0;
4786 asoc->total_flight = 0;
4787 asoc->total_flight_count = 0;
4791 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4792 * things. The total byte count acked is tracked in netAckSz AND
4793 * netAck2 is used to track the total bytes acked that are un-
4794 * amibguious and were never retransmitted. We track these on a per
4795 * destination address basis.
4797 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4798 net->prev_cwnd = net->cwnd;
4803 * CMT: Reset CUC and Fast recovery algo variables before
4806 net->new_pseudo_cumack = 0;
4807 net->will_exit_fast_recovery = 0;
4809 /* process the new consecutive TSN first */
4810 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4812 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4814 last_tsn == tp1->rec.data.TSN_seq) {
4815 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4817 * ECN Nonce: Add the nonce to the sender's
4820 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4822 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4824 * If it is less than ACKED, it is
4825 * now no-longer in flight. Higher
4826 * values may occur during marking
4828 if ((tp1->whoTo->dest_state &
4829 SCTP_ADDR_UNCONFIRMED) &&
4830 (tp1->snd_count < 2)) {
4832 * If there was no retran
4833 * and the address is
4834 * un-confirmed and we sent
4836 * sacked.. its confirmed,
4839 tp1->whoTo->dest_state &=
4840 ~SCTP_ADDR_UNCONFIRMED;
4842 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4843 #ifdef SCTP_FLIGHT_LOGGING
4844 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4845 tp1->whoTo->flight_size,
4847 (uintptr_t) tp1->whoTo,
4848 tp1->rec.data.TSN_seq);
4850 sctp_flight_size_decrease(tp1);
4851 sctp_total_flight_decrease(stcb, tp1);
4853 tp1->whoTo->net_ack += tp1->send_size;
4855 /* CMT SFR and DAC algos */
4856 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4857 tp1->whoTo->saw_newack = 1;
4859 if (tp1->snd_count < 2) {
4861 * True non-retransmited
4864 tp1->whoTo->net_ack2 +=
4867 /* update RTO too? */
4870 sctp_calculate_rto(stcb,
4872 &tp1->sent_rcv_time);
4877 * CMT: CUCv2 algorithm. From the
4878 * cumack'd TSNs, for each TSN being
4879 * acked for the first time, set the
4880 * following variables for the
4881 * corresp destination.
4882 * new_pseudo_cumack will trigger a
4884 * find_(rtx_)pseudo_cumack will
4885 * trigger search for the next
4886 * expected (rtx-)pseudo-cumack.
4888 tp1->whoTo->new_pseudo_cumack = 1;
4889 tp1->whoTo->find_pseudo_cumack = 1;
4890 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4893 #ifdef SCTP_SACK_LOGGING
4894 sctp_log_sack(asoc->last_acked_seq,
4896 tp1->rec.data.TSN_seq,
4899 SCTP_LOG_TSN_ACKED);
4901 #ifdef SCTP_CWND_LOGGING
4902 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4905 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4906 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4907 #ifdef SCTP_AUDITING_ENABLED
4908 sctp_audit_log(0xB3,
4909 (asoc->sent_queue_retran_cnt & 0x000000ff));
4912 if (tp1->rec.data.chunk_was_revoked) {
4913 /* deflate the cwnd */
4914 tp1->whoTo->cwnd -= tp1->book_size;
4915 tp1->rec.data.chunk_was_revoked = 0;
4917 tp1->sent = SCTP_DATAGRAM_ACKED;
4922 tp1 = TAILQ_NEXT(tp1, sctp_next);
4924 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4925 /* always set this up to cum-ack */
4926 asoc->this_sack_highest_gap = last_tsn;
4928 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
4930 /* skip corrupt segments */
4936 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4937 * to be greater than the cumack. Also reset saw_newack to 0
4940 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4941 net->saw_newack = 0;
4942 net->this_sack_highest_newack = last_tsn;
4946 * thisSackHighestGap will increase while handling NEW
4947 * segments this_sack_highest_newack will increase while
4948 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4949 * used for CMT DAC algo. saw_newack will also change.
4951 sctp_handle_segments(stcb, asoc, ch, last_tsn,
4952 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4953 num_seg, &ecn_seg_sums);
4955 if (sctp_strict_sacks) {
4957 * validate the biggest_tsn_acked in the gap acks if
4958 * strict adherence is wanted.
4960 if ((biggest_tsn_acked == send_s) ||
4961 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4963 * peer is either confused or we are under
4964 * attack. We must abort.
4971 /*******************************************/
4972 /* cancel ALL T3-send timer if accum moved */
4973 /*******************************************/
4974 if (sctp_cmt_on_off) {
4975 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4976 if (net->new_pseudo_cumack)
4977 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4979 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4984 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4985 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4986 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4990 /********************************************/
4991 /* drop the acked chunks from the sendqueue */
4992 /********************************************/
4993 asoc->last_acked_seq = cum_ack;
4995 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4999 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
5003 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
5004 /* no more sent on list */
5007 tp2 = TAILQ_NEXT(tp1, sctp_next);
5008 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
5010 * Friendlier printf in lieu of panic now that I think its
5014 if (tp1->pr_sctp_on) {
5015 if (asoc->pr_sctp_cnt != 0)
5016 asoc->pr_sctp_cnt--;
5018 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
5019 (asoc->total_flight > 0)) {
5021 panic("Warning flight size is postive and should be 0");
5023 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
5024 asoc->total_flight);
5026 asoc->total_flight = 0;
5029 sctp_free_bufspace(stcb, asoc, tp1, 1);
5030 sctp_m_freem(tp1->data);
5031 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5032 asoc->sent_queue_cnt_removeable--;
5035 #ifdef SCTP_SACK_LOGGING
5036 sctp_log_sack(asoc->last_acked_seq,
5038 tp1->rec.data.TSN_seq,
5041 SCTP_LOG_FREE_SENT);
5044 asoc->sent_queue_cnt--;
5045 sctp_free_remote_addr(tp1->whoTo);
5047 sctp_free_a_chunk(stcb, tp1);
5050 } while (tp1 != NULL);
5053 if ((wake_him) && (stcb->sctp_socket)) {
5054 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
5055 #ifdef SCTP_WAKE_LOGGING
5056 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
5058 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
5059 #ifdef SCTP_WAKE_LOGGING
5061 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
5065 if (asoc->fast_retran_loss_recovery && accum_moved) {
5066 if (compare_with_wrap(asoc->last_acked_seq,
5067 asoc->fast_recovery_tsn, MAX_TSN) ||
5068 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
5069 /* Setup so we will exit RFC2582 fast recovery */
5070 will_exit_fast_recovery = 1;
5074 * Check for revoked fragments:
5076 * if Previous sack - Had no frags then we can't have any revoked if
5077 * Previous sack - Had frag's then - If we now have frags aka
5078 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
5079 * some of them. else - The peer revoked all ACKED fragments, since
5080 * we had some before and now we have NONE.
5084 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
5085 else if (asoc->saw_sack_with_frags) {
5086 int cnt_revoked = 0;
5088 tp1 = TAILQ_FIRST(&asoc->sent_queue);
5090 /* Peer revoked all dg's marked or acked */
5091 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5092 if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
5093 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
5094 tp1->sent = SCTP_DATAGRAM_SENT;
5095 #ifdef SCTP_FLIGHT_LOGGING
5096 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
5097 tp1->whoTo->flight_size,
5099 (uintptr_t) tp1->whoTo,
5100 tp1->rec.data.TSN_seq);
5102 sctp_flight_size_increase(tp1);
5103 sctp_total_flight_increase(stcb, tp1);
5104 tp1->rec.data.chunk_was_revoked = 1;
5106 * To ensure that this increase in
5107 * flightsize, which is artificial,
5108 * does not throttle the sender, we
5109 * also increase the cwnd
5112 tp1->whoTo->cwnd += tp1->book_size;
5120 asoc->saw_sack_with_frags = 0;
5123 asoc->saw_sack_with_frags = 1;
5125 asoc->saw_sack_with_frags = 0;
5128 sctp_cwnd_update(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5130 if (TAILQ_EMPTY(&asoc->sent_queue)) {
5131 /* nothing left in-flight */
5132 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5133 /* stop all timers */
5134 if (sctp_early_fr) {
5135 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5136 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5137 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5138 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
5141 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5142 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5143 net->flight_size = 0;
5144 net->partial_bytes_acked = 0;
5146 asoc->total_flight = 0;
5147 asoc->total_flight_count = 0;
5149 /**********************************/
5150 /* Now what about shutdown issues */
5151 /**********************************/
5152 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5153 /* nothing left on sendqueue.. consider done */
5154 #ifdef SCTP_LOG_RWND
5155 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5156 asoc->peers_rwnd, 0, 0, a_rwnd);
5158 asoc->peers_rwnd = a_rwnd;
5159 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5160 /* SWS sender side engages */
5161 asoc->peers_rwnd = 0;
5164 if ((asoc->stream_queue_cnt == 1) &&
5165 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5166 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5167 (asoc->locked_on_sending)
5169 struct sctp_stream_queue_pending *sp;
5172 * I may be in a state where we got all across.. but
5173 * cannot write more due to a shutdown... we abort
5174 * since the user did not indicate EOR in this case.
5176 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5178 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) {
5179 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5180 asoc->locked_on_sending = NULL;
5181 asoc->stream_queue_cnt--;
5184 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5185 (asoc->stream_queue_cnt == 0)) {
5186 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5187 /* Need to abort here */
5193 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5194 0, M_DONTWAIT, 1, MT_DATA);
5196 struct sctp_paramhdr *ph;
5199 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5201 ph = mtod(oper, struct sctp_paramhdr *);
5202 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5203 ph->param_length = htons(SCTP_BUF_LEN(oper));
5204 ippp = (uint32_t *) (ph + 1);
5205 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5207 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5208 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper);
5211 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5212 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5213 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5215 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
5216 sctp_stop_timers_for_shutdown(stcb);
5217 sctp_send_shutdown(stcb,
5218 stcb->asoc.primary_destination);
5219 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5220 stcb->sctp_ep, stcb, asoc->primary_destination);
5221 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5222 stcb->sctp_ep, stcb, asoc->primary_destination);
5225 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5226 (asoc->stream_queue_cnt == 0)) {
5227 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5230 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5231 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
5232 sctp_send_shutdown_ack(stcb,
5233 stcb->asoc.primary_destination);
5235 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5236 stcb->sctp_ep, stcb, asoc->primary_destination);
5241 * Now here we are going to recycle net_ack for a different use...
5244 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5249 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5250 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5251 * automatically ensure that.
5253 if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) {
5254 this_sack_lowest_newack = cum_ack;
5257 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5258 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5260 /*********************************************/
5261 /* Here we perform PR-SCTP procedures */
5263 /*********************************************/
5264 /* C1. update advancedPeerAckPoint */
5265 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5266 asoc->advanced_peer_ack_point = cum_ack;
5268 /* C2. try to further move advancedPeerAckPoint ahead */
5270 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5271 struct sctp_tmit_chunk *lchk;
5273 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5274 /* C3. See if we need to send a Fwd-TSN */
5275 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5278 * ISSUE with ECN, see FWD-TSN processing for notes
5279 * on issues that will occur when the ECN NONCE
5280 * stuff is put into SCTP for cross checking.
5282 send_forward_tsn(stcb, asoc);
5285 * ECN Nonce: Disable Nonce Sum check when FWD TSN
5286 * is sent and store resync tsn
5288 asoc->nonce_sum_check = 0;
5289 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5291 /* Assure a timer is up */
5292 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5293 stcb->sctp_ep, stcb, lchk->whoTo);
5298 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
5299 * (net->fast_retran_loss_recovery == 0)))
5301 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5302 if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) {
5303 /* out of a RFC2582 Fast recovery window? */
5304 if (net->net_ack > 0) {
5306 * per section 7.2.3, are there any
5307 * destinations that had a fast retransmit
5308 * to them. If so what we need to do is
5309 * adjust ssthresh and cwnd.
5311 struct sctp_tmit_chunk *lchk;
5313 #ifdef SCTP_HIGH_SPEED
5314 sctp_hs_cwnd_decrease(stcb, net);
5316 #ifdef SCTP_CWND_MONITOR
5317 int old_cwnd = net->cwnd;
5320 net->ssthresh = net->cwnd / 2;
5321 if (net->ssthresh < (net->mtu * 2)) {
5322 net->ssthresh = 2 * net->mtu;
5324 net->cwnd = net->ssthresh;
5325 #ifdef SCTP_CWND_MONITOR
5326 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
5327 SCTP_CWND_LOG_FROM_FR);
5331 lchk = TAILQ_FIRST(&asoc->send_queue);
5333 net->partial_bytes_acked = 0;
5334 /* Turn on fast recovery window */
5335 asoc->fast_retran_loss_recovery = 1;
5337 /* Mark end of the window */
5338 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
5340 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
5344 * CMT fast recovery -- per destination
5345 * recovery variable.
5347 net->fast_retran_loss_recovery = 1;
5350 /* Mark end of the window */
5351 net->fast_recovery_tsn = asoc->sending_seq - 1;
5353 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
5359 * Disable Nonce Sum Checking and store the
5362 asoc->nonce_sum_check = 0;
5363 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
5365 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
5366 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5367 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5368 stcb->sctp_ep, stcb, net);
5370 } else if (net->net_ack > 0) {
5372 * Mark a peg that we WOULD have done a cwnd
5373 * reduction but RFC2582 prevented this action.
5375 SCTP_STAT_INCR(sctps_fastretransinrtt);
5380 /******************************************************************
5381 * Here we do the stuff with ECN Nonce checking.
5382 * We basically check to see if the nonce sum flag was incorrect
5383 * or if resynchronization needs to be done. Also if we catch a
5384 * misbehaving receiver we give him the kick.
5385 ******************************************************************/
5387 if (asoc->ecn_nonce_allowed) {
5388 if (asoc->nonce_sum_check) {
5389 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5390 if (asoc->nonce_wait_for_ecne == 0) {
5391 struct sctp_tmit_chunk *lchk;
5393 lchk = TAILQ_FIRST(&asoc->send_queue);
5394 asoc->nonce_wait_for_ecne = 1;
5396 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5398 asoc->nonce_wait_tsn = asoc->sending_seq;
5401 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5402 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5404 * Misbehaving peer. We need
5405 * to react to this guy
5407 asoc->ecn_allowed = 0;
5408 asoc->ecn_nonce_allowed = 0;
5413 /* See if Resynchronization Possible */
5414 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5415 asoc->nonce_sum_check = 1;
5417 * now we must calculate what the base is.
5418 * We do this based on two things, we know
5419 * the total's for all the segments
5420 * gap-acked in the SACK, its stored in
5421 * ecn_seg_sums. We also know the SACK's
5422 * nonce sum, its in nonce_sum_flag. So we
5423 * can build a truth table to back-calculate
5425 * asoc->nonce_sum_expect_base:
5427 * SACK-flag-Value Seg-Sums Base 0 0 0
5431 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5435 /* Now are we exiting loss recovery ? */
5436 if (will_exit_fast_recovery) {
5437 /* Ok, we must exit fast recovery */
5438 asoc->fast_retran_loss_recovery = 0;
5440 if ((asoc->sat_t3_loss_recovery) &&
5441 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5443 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5444 /* end satellite t3 loss recovery */
5445 asoc->sat_t3_loss_recovery = 0;
5450 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5451 if (net->will_exit_fast_recovery) {
5452 /* Ok, we must exit fast recovery */
5453 net->fast_retran_loss_recovery = 0;
5457 /* Adjust and set the new rwnd value */
5458 #ifdef SCTP_LOG_RWND
5459 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5460 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
5463 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5464 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
5465 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5466 /* SWS sender side engages */
5467 asoc->peers_rwnd = 0;
5469 if (asoc->peers_rwnd > old_rwnd) {
5470 win_probe_recovery = 1;
5473 * Now we must setup so we have a timer up for anyone with
5479 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5480 if (win_probe_recovery && (net->window_probe)) {
5481 net->window_probe = 0;
5482 win_probe_recovered = 1;
5484 * Find first chunk that was used with
5485 * window probe and clear the event. Put
5486 * it back into the send queue as if has
5489 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5490 if (tp1->window_probe) {
5491 sctp_window_probe_recovery(stcb, asoc, net, tp1);
5496 if (net->flight_size) {
5498 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5499 stcb->sctp_ep, stcb, net);
5501 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5502 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5504 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5506 if (sctp_early_fr) {
5507 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5508 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5509 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5510 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5516 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5517 (asoc->sent_queue_retran_cnt == 0) &&
5518 (win_probe_recovered == 0) &&
5520 /* huh, this should not happen */
5521 sctp_fs_audit(asoc);
5522 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5523 net->flight_size = 0;
5525 asoc->total_flight = 0;
5526 asoc->total_flight_count = 0;
5527 asoc->sent_queue_retran_cnt = 0;
5528 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5529 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5530 sctp_flight_size_increase(tp1);
5531 sctp_total_flight_increase(stcb, tp1);
5532 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5533 asoc->sent_queue_retran_cnt++;
5539 #ifdef SCTP_SACK_RWND_LOGGING
5540 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5542 stcb->asoc.peers_rwnd,
5543 stcb->asoc.total_flight,
5544 stcb->asoc.total_output_queue_size);
5551 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5552 struct sctp_nets *netp, int *abort_flag)
5555 uint32_t cum_ack, a_rwnd;
5557 cum_ack = ntohl(cp->cumulative_tsn_ack);
5558 /* Arrange so a_rwnd does NOT change */
5559 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5561 /* Now call the express sack handling */
5562 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5566 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5567 struct sctp_stream_in *strmin)
5569 struct sctp_queued_to_read *ctl, *nctl;
5570 struct sctp_association *asoc;
5574 tt = strmin->last_sequence_delivered;
5576 * First deliver anything prior to and including the stream no that
5579 ctl = TAILQ_FIRST(&strmin->inqueue);
5581 nctl = TAILQ_NEXT(ctl, next);
5582 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5583 (tt == ctl->sinfo_ssn)) {
5584 /* this is deliverable now */
5585 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5586 /* subtract pending on streams */
5587 asoc->size_on_all_streams -= ctl->length;
5588 sctp_ucount_decr(asoc->cnt_on_all_streams);
5589 /* deliver it to at least the delivery-q */
5590 if (stcb->sctp_socket) {
5591 sctp_add_to_readq(stcb->sctp_ep, stcb,
5593 &stcb->sctp_socket->so_rcv, 1);
5596 /* no more delivery now. */
5602 * now we must deliver things in queue the normal way if any are
5605 tt = strmin->last_sequence_delivered + 1;
5606 ctl = TAILQ_FIRST(&strmin->inqueue);
5608 nctl = TAILQ_NEXT(ctl, next);
5609 if (tt == ctl->sinfo_ssn) {
5610 /* this is deliverable now */
5611 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5612 /* subtract pending on streams */
5613 asoc->size_on_all_streams -= ctl->length;
5614 sctp_ucount_decr(asoc->cnt_on_all_streams);
5615 /* deliver it to at least the delivery-q */
5616 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5617 if (stcb->sctp_socket) {
5618 sctp_add_to_readq(stcb->sctp_ep, stcb,
5620 &stcb->sctp_socket->so_rcv, 1);
5622 tt = strmin->last_sequence_delivered + 1;
5631 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5632 struct sctp_forward_tsn_chunk *fwd, int *abort_flag)
5635 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5636 * forward TSN, when the SACK comes back that acknowledges the
5637 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5638 * get quite tricky since we may have sent more data interveneing
5639 * and must carefully account for what the SACK says on the nonce
5640 * and any gaps that are reported. This work will NOT be done here,
5641 * but I note it here since it is really related to PR-SCTP and
5645 /* The pr-sctp fwd tsn */
5647 * here we will perform all the data receiver side steps for
5648 * processing FwdTSN, as required in by pr-sctp draft:
5650 * Assume we get FwdTSN(x):
5652 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5653 * others we have 3) examine and update re-ordering queue on
5654 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5655 * report where we are.
5657 struct sctp_strseq *stseq;
5658 struct sctp_association *asoc;
5659 uint32_t new_cum_tsn, gap, back_out_htsn;
5660 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
5661 struct sctp_stream_in *strm;
5662 struct sctp_tmit_chunk *chk, *at;
5664 cumack_set_flag = 0;
5667 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5668 SCTPDBG(SCTP_DEBUG_INDATA1,
5669 "Bad size too small/big fwd-tsn\n");
5672 m_size = (stcb->asoc.mapping_array_size << 3);
5673 /*************************************************************/
5674 /* 1. Here we update local cumTSN and shift the bitmap array */
5675 /*************************************************************/
5676 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5678 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5679 asoc->cumulative_tsn == new_cum_tsn) {
5680 /* Already got there ... */
5683 back_out_htsn = asoc->highest_tsn_inside_map;
5684 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
5686 asoc->highest_tsn_inside_map = new_cum_tsn;
5687 #ifdef SCTP_MAP_LOGGING
5688 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5692 * now we know the new TSN is more advanced, let's find the actual
5695 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
5697 (new_cum_tsn == asoc->mapping_array_base_tsn)) {
5698 gap = new_cum_tsn - asoc->mapping_array_base_tsn;
5700 /* try to prevent underflow here */
5701 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
5705 asoc->highest_tsn_inside_map = back_out_htsn;
5706 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5710 * out of range (of single byte chunks in the rwnd I
5711 * give out). This must be an attacker.
5714 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5715 0, M_DONTWAIT, 1, MT_DATA);
5717 struct sctp_paramhdr *ph;
5720 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5721 (sizeof(uint32_t) * 3);
5722 ph = mtod(oper, struct sctp_paramhdr *);
5723 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5724 ph->param_length = htons(SCTP_BUF_LEN(oper));
5725 ippp = (uint32_t *) (ph + 1);
5726 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5728 *ippp = asoc->highest_tsn_inside_map;
5730 *ippp = new_cum_tsn;
5732 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5733 sctp_abort_an_association(stcb->sctp_ep, stcb,
5734 SCTP_PEER_FAULTY, oper);
5737 if (asoc->highest_tsn_inside_map >
5738 asoc->mapping_array_base_tsn) {
5739 gap = asoc->highest_tsn_inside_map -
5740 asoc->mapping_array_base_tsn;
5742 gap = asoc->highest_tsn_inside_map +
5743 (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
5745 cumack_set_flag = 1;
5747 for (i = 0; i <= gap; i++) {
5748 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
5751 * Now after marking all, slide thing forward but no sack please.
5753 sctp_sack_check(stcb, 0, 0, abort_flag);
5757 if (cumack_set_flag) {
5759 * fwd-tsn went outside my gap array - not a common
5760 * occurance. Do the same thing we do when a cookie-echo
5763 asoc->highest_tsn_inside_map = new_cum_tsn - 1;
5764 asoc->mapping_array_base_tsn = new_cum_tsn;
5765 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
5766 #ifdef SCTP_MAP_LOGGING
5767 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5769 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5771 /*************************************************************/
5772 /* 2. Clear up re-assembly queue */
5773 /*************************************************************/
5776 * First service it if pd-api is up, just in case we can progress it
5779 if (asoc->fragmented_delivery_inprogress) {
5780 sctp_service_reassembly(stcb, asoc);
5782 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5783 /* For each one on here see if we need to toss it */
5785 * For now large messages held on the reasmqueue that are
5786 * complete will be tossed too. We could in theory do more
5787 * work to spin through and stop after dumping one msg aka
5788 * seeing the start of a new msg at the head, and call the
5789 * delivery function... to see if it can be delivered... But
5790 * for now we just dump everything on the queue.
5792 chk = TAILQ_FIRST(&asoc->reasmqueue);
5794 at = TAILQ_NEXT(chk, sctp_next);
5795 if (compare_with_wrap(asoc->cumulative_tsn,
5796 chk->rec.data.TSN_seq, MAX_TSN) ||
5797 asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
5798 /* It needs to be tossed */
5799 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5800 if (compare_with_wrap(chk->rec.data.TSN_seq,
5801 asoc->tsn_last_delivered, MAX_TSN)) {
5802 asoc->tsn_last_delivered =
5803 chk->rec.data.TSN_seq;
5804 asoc->str_of_pdapi =
5805 chk->rec.data.stream_number;
5806 asoc->ssn_of_pdapi =
5807 chk->rec.data.stream_seq;
5808 asoc->fragment_flags =
5809 chk->rec.data.rcv_flags;
5811 asoc->size_on_reasm_queue -= chk->send_size;
5812 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5815 /* Clear up any stream problem */
5816 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5817 SCTP_DATA_UNORDERED &&
5818 (compare_with_wrap(chk->rec.data.stream_seq,
5819 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5822 * We must dump forward this streams
5823 * sequence number if the chunk is
5824 * not unordered that is being
5825 * skipped. There is a chance that
5826 * if the peer does not include the
5827 * last fragment in its FWD-TSN we
5828 * WILL have a problem here since
5829 * you would have a partial chunk in
5830 * queue that may not be
5831 * deliverable. Also if a Partial
5832 * delivery API as started the user
5833 * may get a partial chunk. The next
5834 * read returning a new chunk...
5835 * really ugly but I see no way
5836 * around it! Maybe a notify??
5838 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5839 chk->rec.data.stream_seq;
5842 sctp_m_freem(chk->data);
5845 sctp_free_remote_addr(chk->whoTo);
5846 sctp_free_a_chunk(stcb, chk);
5849 * Ok we have gone beyond the end of the
5850 * fwd-tsn's mark. Some checks...
5852 if ((asoc->fragmented_delivery_inprogress) &&
5853 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5857 * Special case PD-API is up and
5858 * what we fwd-tsn' over includes
5859 * one that had the LAST_FRAG. We no
5860 * longer need to do the PD-API.
5862 asoc->fragmented_delivery_inprogress = 0;
5864 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
5865 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5866 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq);
5874 if (asoc->fragmented_delivery_inprogress) {
5876 * Ok we removed cnt_gone chunks in the PD-API queue that
5877 * were being delivered. So now we must turn off the flag.
5881 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
5882 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5883 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq);
5884 asoc->fragmented_delivery_inprogress = 0;
5886 /*************************************************************/
5887 /* 3. Update the PR-stream re-ordering queues */
5888 /*************************************************************/
5889 stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd));
5890 fwd_sz -= sizeof(*fwd);
5893 unsigned int num_str;
5895 num_str = fwd_sz / sizeof(struct sctp_strseq);
5896 for (i = 0; i < num_str; i++) {
5901 xx = (unsigned char *)&stseq[i];
5902 st = ntohs(stseq[i].stream);
5903 stseq[i].stream = st;
5904 st = ntohs(stseq[i].sequence);
5905 stseq[i].sequence = st;
5907 if (stseq[i].stream > asoc->streamincnt) {
5909 * It is arguable if we should continue.
5910 * Since the peer sent bogus stream info we
5911 * may be in deep trouble.. a return may be
5916 strm = &asoc->strmin[stseq[i].stream];
5917 if (compare_with_wrap(stseq[i].sequence,
5918 strm->last_sequence_delivered, MAX_SEQ)) {
5919 /* Update the sequence number */
5920 strm->last_sequence_delivered =
5923 /* now kick the stream the new way */
5924 sctp_kick_prsctp_reorder_queue(stcb, strm);
5927 if (TAILQ_FIRST(&asoc->reasmqueue)) {
5928 /* now lets kick out and check for more fragmented delivery */
5929 sctp_deliver_reasm_check(stcb, &stcb->asoc);