2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 uint32_t calc, calc_save;
65 * This is really set wrong with respect to a 1-2-m socket. Since
66 * the sb_cc is the count that everyone as put up. When we re-write
67 * sctp_soreceive then we will fix this so that ONLY this
68 * associations data is taken into account.
70 if (stcb->sctp_socket == NULL)
73 if (stcb->asoc.sb_cc == 0 &&
74 asoc->size_on_reasm_queue == 0 &&
75 asoc->size_on_all_streams == 0) {
76 /* Full rwnd granted */
77 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
81 /* get actual space */
82 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
85 * take out what has NOT been put on socket queue and we yet hold
88 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
89 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
96 /* what is the overhead of all these rwnd's */
98 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
101 asoc->my_rwnd = calc;
102 if ((asoc->my_rwnd == 0) &&
103 (calc < stcb->asoc.my_rwnd_control_len)) {
105 * If our rwnd == 0 && the overhead is greater than the
106 * data onqueue, we clamp the rwnd to 1. This lets us
107 * still accept inbound segments, but hopefully will shut
108 * the sender down when he finally gets the message. This
109 * hopefully will gracefully avoid discarding packets.
114 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
115 /* SWS engaged, tell peer none left */
120 /* Calculate what the rwnd would be */
122 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
124 uint32_t calc = 0, calc_save = 0, result = 0;
127 * This is really set wrong with respect to a 1-2-m socket. Since
128 * the sb_cc is the count that everyone as put up. When we re-write
129 * sctp_soreceive then we will fix this so that ONLY this
130 * associations data is taken into account.
132 if (stcb->sctp_socket == NULL)
135 if (stcb->asoc.sb_cc == 0 &&
136 asoc->size_on_reasm_queue == 0 &&
137 asoc->size_on_all_streams == 0) {
138 /* Full rwnd granted */
139 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
143 /* get actual space */
144 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
147 * take out what has NOT been put on socket queue and we yet hold
150 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
151 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
157 /* what is the overhead of all these rwnd's */
158 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
163 (calc < stcb->asoc.my_rwnd_control_len)) {
165 * If our rwnd == 0 && the overhead is greater than the
166 * data onqueue, we clamp the rwnd to 1. This lets us
167 * still accept inbound segments, but hopefully will shut
168 * the sender down when he finally gets the message. This
169 * hopefully will gracefully avoid discarding packets.
174 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
175 /* SWS engaged, tell peer none left */
184 * Build out our readq entry based on the incoming packet.
186 struct sctp_queued_to_read *
187 sctp_build_readq_entry(struct sctp_tcb *stcb,
188 struct sctp_nets *net,
189 uint32_t tsn, uint32_t ppid,
190 uint32_t context, uint16_t stream_no,
191 uint16_t stream_seq, uint8_t flags,
194 struct sctp_queued_to_read *read_queue_e = NULL;
196 sctp_alloc_a_readq(stcb, read_queue_e);
197 if (read_queue_e == NULL) {
200 read_queue_e->sinfo_stream = stream_no;
201 read_queue_e->sinfo_ssn = stream_seq;
202 read_queue_e->sinfo_flags = (flags << 8);
203 read_queue_e->sinfo_ppid = ppid;
204 read_queue_e->sinfo_context = stcb->asoc.context;
205 read_queue_e->sinfo_timetolive = 0;
206 read_queue_e->sinfo_tsn = tsn;
207 read_queue_e->sinfo_cumtsn = tsn;
208 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
209 read_queue_e->whoFrom = net;
210 read_queue_e->length = 0;
211 atomic_add_int(&net->ref_count, 1);
212 read_queue_e->data = dm;
213 read_queue_e->spec_flags = 0;
214 read_queue_e->tail_mbuf = NULL;
215 read_queue_e->aux_data = NULL;
216 read_queue_e->stcb = stcb;
217 read_queue_e->port_from = stcb->rport;
218 read_queue_e->do_not_ref_stcb = 0;
219 read_queue_e->end_added = 0;
220 read_queue_e->some_taken = 0;
221 read_queue_e->pdapi_aborted = 0;
223 return (read_queue_e);
228 * Build out our readq entry based on the incoming packet.
230 static struct sctp_queued_to_read *
231 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
232 struct sctp_tmit_chunk *chk)
234 struct sctp_queued_to_read *read_queue_e = NULL;
236 sctp_alloc_a_readq(stcb, read_queue_e);
237 if (read_queue_e == NULL) {
240 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
241 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
242 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
243 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
244 read_queue_e->sinfo_context = stcb->asoc.context;
245 read_queue_e->sinfo_timetolive = 0;
246 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
247 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
248 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
249 read_queue_e->whoFrom = chk->whoTo;
250 read_queue_e->aux_data = NULL;
251 read_queue_e->length = 0;
252 atomic_add_int(&chk->whoTo->ref_count, 1);
253 read_queue_e->data = chk->data;
254 read_queue_e->tail_mbuf = NULL;
255 read_queue_e->stcb = stcb;
256 read_queue_e->port_from = stcb->rport;
257 read_queue_e->spec_flags = 0;
258 read_queue_e->do_not_ref_stcb = 0;
259 read_queue_e->end_added = 0;
260 read_queue_e->some_taken = 0;
261 read_queue_e->pdapi_aborted = 0;
263 return (read_queue_e);
268 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
269 struct sctp_sndrcvinfo *sinfo)
271 struct sctp_sndrcvinfo *outinfo;
275 int use_extended = 0;
277 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
278 /* user does not want the sndrcv ctl */
281 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
283 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
285 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
289 ret = sctp_get_mbuf_for_msg(len,
290 0, M_DONTWAIT, 1, MT_DATA);
296 /* We need a CMSG header followed by the struct */
297 cmh = mtod(ret, struct cmsghdr *);
298 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
299 cmh->cmsg_level = IPPROTO_SCTP;
301 cmh->cmsg_type = SCTP_EXTRCV;
303 memcpy(outinfo, sinfo, len);
305 cmh->cmsg_type = SCTP_SNDRCV;
309 SCTP_BUF_LEN(ret) = cmh->cmsg_len;
315 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
317 struct sctp_sndrcvinfo *sinfo)
319 struct sctp_sndrcvinfo *outinfo;
323 int use_extended = 0;
325 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
326 /* user does not want the sndrcv ctl */
329 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
331 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
333 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
335 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
340 /* We need a CMSG header followed by the struct */
341 cmh = (struct cmsghdr *)buf;
342 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
343 cmh->cmsg_level = IPPROTO_SCTP;
345 cmh->cmsg_type = SCTP_EXTRCV;
347 memcpy(outinfo, sinfo, len);
349 cmh->cmsg_type = SCTP_SNDRCV;
359 * We are delivering currently from the reassembly queue. We must continue to
360 * deliver until we either: 1) run out of space. 2) run out of sequential
361 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
364 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
366 struct sctp_tmit_chunk *chk;
371 struct sctp_queued_to_read *control, *ctl, *ctlat;
376 cntDel = stream_no = 0;
377 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
378 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379 /* socket above is long gone */
380 asoc->fragmented_delivery_inprogress = 0;
381 chk = TAILQ_FIRST(&asoc->reasmqueue);
383 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384 asoc->size_on_reasm_queue -= chk->send_size;
385 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
387 * Lose the data pointer, since its in the socket
391 sctp_m_freem(chk->data);
394 /* Now free the address and data */
395 sctp_free_a_chunk(stcb, chk);
396 /* sa_ignore FREED_MEMORY */
397 chk = TAILQ_FIRST(&asoc->reasmqueue);
401 SCTP_TCB_LOCK_ASSERT(stcb);
403 chk = TAILQ_FIRST(&asoc->reasmqueue);
407 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
408 /* Can't deliver more :< */
411 stream_no = chk->rec.data.stream_number;
412 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
413 if (nxt_todel != chk->rec.data.stream_seq &&
414 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
416 * Not the next sequence to deliver in its stream OR
421 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
423 control = sctp_build_readq_entry_chk(stcb, chk);
424 if (control == NULL) {
428 /* save it off for our future deliveries */
429 stcb->asoc.control_pdapi = control;
430 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
434 sctp_add_to_readq(stcb->sctp_ep,
435 stcb, control, &stcb->sctp_socket->so_rcv, end, SCTP_SO_NOT_LOCKED);
438 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
442 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
443 stcb->asoc.control_pdapi,
444 chk->data, end, chk->rec.data.TSN_seq,
445 &stcb->sctp_socket->so_rcv)) {
447 * something is very wrong, either
448 * control_pdapi is NULL, or the tail_mbuf
449 * is corrupt, or there is a EOM already on
452 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
453 panic("This should not happen control_pdapi NULL?");
455 /* if we did not panic, it was a EOM */
456 panic("Bad chunking ??");
461 /* pull it we did it */
462 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
463 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
464 asoc->fragmented_delivery_inprogress = 0;
465 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
466 asoc->strmin[stream_no].last_sequence_delivered++;
468 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
469 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
471 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
473 * turn the flag back on since we just delivered
476 asoc->fragmented_delivery_inprogress = 1;
478 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
479 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
480 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
481 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
483 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
484 asoc->size_on_reasm_queue -= chk->send_size;
485 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
486 /* free up the chk */
488 sctp_free_a_chunk(stcb, chk);
490 if (asoc->fragmented_delivery_inprogress == 0) {
492 * Now lets see if we can deliver the next one on
495 struct sctp_stream_in *strm;
497 strm = &asoc->strmin[stream_no];
498 nxt_todel = strm->last_sequence_delivered + 1;
499 ctl = TAILQ_FIRST(&strm->inqueue);
500 if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
501 while (ctl != NULL) {
502 /* Deliver more if we can. */
503 if (nxt_todel == ctl->sinfo_ssn) {
504 ctlat = TAILQ_NEXT(ctl, next);
505 TAILQ_REMOVE(&strm->inqueue, ctl, next);
506 asoc->size_on_all_streams -= ctl->length;
507 sctp_ucount_decr(asoc->cnt_on_all_streams);
508 strm->last_sequence_delivered++;
509 sctp_add_to_readq(stcb->sctp_ep, stcb,
511 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
516 nxt_todel = strm->last_sequence_delivered + 1;
521 /* sa_ignore FREED_MEMORY */
522 chk = TAILQ_FIRST(&asoc->reasmqueue);
527 * Queue the chunk either right into the socket buffer if it is the next one
528 * to go OR put it in the correct place in the delivery queue. If we do
529 * append to the so_buf, keep doing so until we are out of order. One big
530 * question still remains, what to do when the socket buffer is FULL??
533 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
534 struct sctp_queued_to_read *control, int *abort_flag)
537 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
538 * all the data in one stream this could happen quite rapidly. One
539 * could use the TSN to keep track of things, but this scheme breaks
540 * down in the other type of stream useage that could occur. Send a
541 * single msg to stream 0, send 4Billion messages to stream 1, now
542 * send a message to stream 0. You have a situation where the TSN
543 * has wrapped but not in the stream. Is this worth worrying about
544 * or should we just change our queue sort at the bottom to be by
547 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
548 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
549 * assignment this could happen... and I don't see how this would be
550 * a violation. So for now I am undecided an will leave the sort by
551 * SSN alone. Maybe a hybred approach is the answer
554 struct sctp_stream_in *strm;
555 struct sctp_queued_to_read *at;
561 asoc->size_on_all_streams += control->length;
562 sctp_ucount_incr(asoc->cnt_on_all_streams);
563 strm = &asoc->strmin[control->sinfo_stream];
564 nxt_todel = strm->last_sequence_delivered + 1;
565 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
566 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
568 SCTPDBG(SCTP_DEBUG_INDATA1,
569 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
570 (uint32_t) control->sinfo_stream,
571 (uint32_t) strm->last_sequence_delivered,
572 (uint32_t) nxt_todel);
573 if (compare_with_wrap(strm->last_sequence_delivered,
574 control->sinfo_ssn, MAX_SEQ) ||
575 (strm->last_sequence_delivered == control->sinfo_ssn)) {
576 /* The incoming sseq is behind where we last delivered? */
577 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
578 control->sinfo_ssn, strm->last_sequence_delivered);
580 * throw it in the stream so it gets cleaned up in
581 * association destruction
583 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
584 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
585 0, M_DONTWAIT, 1, MT_DATA);
587 struct sctp_paramhdr *ph;
590 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
591 (sizeof(uint32_t) * 3);
592 ph = mtod(oper, struct sctp_paramhdr *);
593 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
594 ph->param_length = htons(SCTP_BUF_LEN(oper));
595 ippp = (uint32_t *) (ph + 1);
596 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
598 *ippp = control->sinfo_tsn;
600 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
602 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
603 sctp_abort_an_association(stcb->sctp_ep, stcb,
604 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
610 if (nxt_todel == control->sinfo_ssn) {
611 /* can be delivered right away? */
612 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
613 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
616 asoc->size_on_all_streams -= control->length;
617 sctp_ucount_decr(asoc->cnt_on_all_streams);
618 strm->last_sequence_delivered++;
619 sctp_add_to_readq(stcb->sctp_ep, stcb,
621 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
622 control = TAILQ_FIRST(&strm->inqueue);
623 while (control != NULL) {
625 nxt_todel = strm->last_sequence_delivered + 1;
626 if (nxt_todel == control->sinfo_ssn) {
627 at = TAILQ_NEXT(control, next);
628 TAILQ_REMOVE(&strm->inqueue, control, next);
629 asoc->size_on_all_streams -= control->length;
630 sctp_ucount_decr(asoc->cnt_on_all_streams);
631 strm->last_sequence_delivered++;
633 * We ignore the return of deliver_data here
634 * since we always can hold the chunk on the
635 * d-queue. And we have a finite number that
636 * can be delivered from the strq.
638 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
639 sctp_log_strm_del(control, NULL,
640 SCTP_STR_LOG_FROM_IMMED_DEL);
642 sctp_add_to_readq(stcb->sctp_ep, stcb,
644 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
653 * Ok, we did not deliver this guy, find the correct place
654 * to put it on the queue.
656 if (TAILQ_EMPTY(&strm->inqueue)) {
658 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
659 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
661 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
663 TAILQ_FOREACH(at, &strm->inqueue, next) {
664 if (compare_with_wrap(at->sinfo_ssn,
665 control->sinfo_ssn, MAX_SEQ)) {
667 * one in queue is bigger than the
668 * new one, insert before this one
670 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
671 sctp_log_strm_del(control, at,
672 SCTP_STR_LOG_FROM_INSERT_MD);
674 TAILQ_INSERT_BEFORE(at, control, next);
676 } else if (at->sinfo_ssn == control->sinfo_ssn) {
678 * Gak, He sent me a duplicate str
682 * foo bar, I guess I will just free
683 * this new guy, should we abort
684 * too? FIX ME MAYBE? Or it COULD be
685 * that the SSN's have wrapped.
686 * Maybe I should compare to TSN
687 * somehow... sigh for now just blow
692 sctp_m_freem(control->data);
693 control->data = NULL;
694 asoc->size_on_all_streams -= control->length;
695 sctp_ucount_decr(asoc->cnt_on_all_streams);
696 if (control->whoFrom)
697 sctp_free_remote_addr(control->whoFrom);
698 control->whoFrom = NULL;
699 sctp_free_a_readq(stcb, control);
702 if (TAILQ_NEXT(at, next) == NULL) {
704 * We are at the end, insert
707 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
708 sctp_log_strm_del(control, at,
709 SCTP_STR_LOG_FROM_INSERT_TL);
711 TAILQ_INSERT_AFTER(&strm->inqueue,
722 * Returns two things: You get the total size of the deliverable parts of the
723 * first fragmented message on the reassembly queue. And you get a 1 back if
724 * all of the message is ready or a 0 back if the message is still incomplete
727 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
729 struct sctp_tmit_chunk *chk;
733 chk = TAILQ_FIRST(&asoc->reasmqueue);
735 /* nothing on the queue */
738 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
739 /* Not a first on the queue */
742 tsn = chk->rec.data.TSN_seq;
744 if (tsn != chk->rec.data.TSN_seq) {
747 *t_size += chk->send_size;
748 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
752 chk = TAILQ_NEXT(chk, sctp_next);
758 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
760 struct sctp_tmit_chunk *chk;
765 chk = TAILQ_FIRST(&asoc->reasmqueue);
768 asoc->size_on_reasm_queue = 0;
769 asoc->cnt_on_reasm_queue = 0;
772 if (asoc->fragmented_delivery_inprogress == 0) {
774 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
775 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
776 (nxt_todel == chk->rec.data.stream_seq ||
777 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
779 * Yep the first one is here and its ok to deliver
782 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
783 (tsize >= stcb->sctp_ep->partial_delivery_point))) {
786 * Yes, we setup to start reception, by
787 * backing down the TSN just in case we
788 * can't deliver. If we
790 asoc->fragmented_delivery_inprogress = 1;
791 asoc->tsn_last_delivered =
792 chk->rec.data.TSN_seq - 1;
794 chk->rec.data.stream_number;
795 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
796 asoc->pdapi_ppid = chk->rec.data.payloadtype;
797 asoc->fragment_flags = chk->rec.data.rcv_flags;
798 sctp_service_reassembly(stcb, asoc);
803 * Service re-assembly will deliver stream data queued at
804 * the end of fragmented delivery.. but it wont know to go
805 * back and call itself again... we do that here with the
808 sctp_service_reassembly(stcb, asoc);
809 if (asoc->fragmented_delivery_inprogress == 0) {
811 * finished our Fragmented delivery, could be more
820 * Dump onto the re-assembly queue, in its proper place. After dumping on the
821 * queue, see if anthing can be delivered. If so pull it off (or as much as
822 * we can. If we run out of space then we must dump what we can and set the
823 * appropriate flag to say we queued what we could.
826 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
827 struct sctp_tmit_chunk *chk, int *abort_flag)
830 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
832 struct sctp_tmit_chunk *at, *prev, *next;
835 cum_ackp1 = asoc->tsn_last_delivered + 1;
836 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
837 /* This is the first one on the queue */
838 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
840 * we do not check for delivery of anything when only one
843 asoc->size_on_reasm_queue = chk->send_size;
844 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
845 if (chk->rec.data.TSN_seq == cum_ackp1) {
846 if (asoc->fragmented_delivery_inprogress == 0 &&
847 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
848 SCTP_DATA_FIRST_FRAG) {
850 * An empty queue, no delivery inprogress,
851 * we hit the next one and it does NOT have
852 * a FIRST fragment mark.
854 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
855 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
856 0, M_DONTWAIT, 1, MT_DATA);
859 struct sctp_paramhdr *ph;
863 sizeof(struct sctp_paramhdr) +
864 (sizeof(uint32_t) * 3);
865 ph = mtod(oper, struct sctp_paramhdr *);
867 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
868 ph->param_length = htons(SCTP_BUF_LEN(oper));
869 ippp = (uint32_t *) (ph + 1);
870 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
872 *ippp = chk->rec.data.TSN_seq;
874 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
877 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
878 sctp_abort_an_association(stcb->sctp_ep, stcb,
879 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
881 } else if (asoc->fragmented_delivery_inprogress &&
882 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
884 * We are doing a partial delivery and the
885 * NEXT chunk MUST be either the LAST or
886 * MIDDLE fragment NOT a FIRST
888 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
889 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
890 0, M_DONTWAIT, 1, MT_DATA);
892 struct sctp_paramhdr *ph;
896 sizeof(struct sctp_paramhdr) +
897 (3 * sizeof(uint32_t));
898 ph = mtod(oper, struct sctp_paramhdr *);
900 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
901 ph->param_length = htons(SCTP_BUF_LEN(oper));
902 ippp = (uint32_t *) (ph + 1);
903 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
905 *ippp = chk->rec.data.TSN_seq;
907 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
909 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
910 sctp_abort_an_association(stcb->sctp_ep, stcb,
911 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
913 } else if (asoc->fragmented_delivery_inprogress) {
915 * Here we are ok with a MIDDLE or LAST
918 if (chk->rec.data.stream_number !=
919 asoc->str_of_pdapi) {
920 /* Got to be the right STR No */
921 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
922 chk->rec.data.stream_number,
924 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
925 0, M_DONTWAIT, 1, MT_DATA);
927 struct sctp_paramhdr *ph;
931 sizeof(struct sctp_paramhdr) +
932 (sizeof(uint32_t) * 3);
934 struct sctp_paramhdr *);
936 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
938 htons(SCTP_BUF_LEN(oper));
939 ippp = (uint32_t *) (ph + 1);
940 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
942 *ippp = chk->rec.data.TSN_seq;
944 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
946 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
947 sctp_abort_an_association(stcb->sctp_ep,
948 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
950 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
951 SCTP_DATA_UNORDERED &&
952 chk->rec.data.stream_seq !=
953 asoc->ssn_of_pdapi) {
954 /* Got to be the right STR Seq */
955 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
956 chk->rec.data.stream_seq,
958 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
959 0, M_DONTWAIT, 1, MT_DATA);
961 struct sctp_paramhdr *ph;
965 sizeof(struct sctp_paramhdr) +
966 (3 * sizeof(uint32_t));
968 struct sctp_paramhdr *);
970 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
972 htons(SCTP_BUF_LEN(oper));
973 ippp = (uint32_t *) (ph + 1);
974 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
976 *ippp = chk->rec.data.TSN_seq;
978 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
981 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
982 sctp_abort_an_association(stcb->sctp_ep,
983 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
991 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
992 if (compare_with_wrap(at->rec.data.TSN_seq,
993 chk->rec.data.TSN_seq, MAX_TSN)) {
995 * one in queue is bigger than the new one, insert
999 asoc->size_on_reasm_queue += chk->send_size;
1000 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1002 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1004 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1005 /* Gak, He sent me a duplicate str seq number */
1007 * foo bar, I guess I will just free this new guy,
1008 * should we abort too? FIX ME MAYBE? Or it COULD be
1009 * that the SSN's have wrapped. Maybe I should
1010 * compare to TSN somehow... sigh for now just blow
1014 sctp_m_freem(chk->data);
1017 sctp_free_a_chunk(stcb, chk);
1020 last_flags = at->rec.data.rcv_flags;
1021 last_tsn = at->rec.data.TSN_seq;
1023 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1025 * We are at the end, insert it after this
1028 /* check it first */
1029 asoc->size_on_reasm_queue += chk->send_size;
1030 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1031 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1036 /* Now the audits */
1038 prev_tsn = chk->rec.data.TSN_seq - 1;
1039 if (prev_tsn == prev->rec.data.TSN_seq) {
1041 * Ok the one I am dropping onto the end is the
1042 * NEXT. A bit of valdiation here.
1044 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1045 SCTP_DATA_FIRST_FRAG ||
1046 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1047 SCTP_DATA_MIDDLE_FRAG) {
1049 * Insert chk MUST be a MIDDLE or LAST
1052 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1053 SCTP_DATA_FIRST_FRAG) {
1054 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1055 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1056 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1057 0, M_DONTWAIT, 1, MT_DATA);
1059 struct sctp_paramhdr *ph;
1062 SCTP_BUF_LEN(oper) =
1063 sizeof(struct sctp_paramhdr) +
1064 (3 * sizeof(uint32_t));
1066 struct sctp_paramhdr *);
1068 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1070 htons(SCTP_BUF_LEN(oper));
1071 ippp = (uint32_t *) (ph + 1);
1072 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1074 *ippp = chk->rec.data.TSN_seq;
1076 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1079 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1080 sctp_abort_an_association(stcb->sctp_ep,
1081 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1085 if (chk->rec.data.stream_number !=
1086 prev->rec.data.stream_number) {
1088 * Huh, need the correct STR here,
1089 * they must be the same.
1091 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1092 chk->rec.data.stream_number,
1093 prev->rec.data.stream_number);
1094 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1095 0, M_DONTWAIT, 1, MT_DATA);
1097 struct sctp_paramhdr *ph;
1100 SCTP_BUF_LEN(oper) =
1101 sizeof(struct sctp_paramhdr) +
1102 (3 * sizeof(uint32_t));
1104 struct sctp_paramhdr *);
1106 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1108 htons(SCTP_BUF_LEN(oper));
1109 ippp = (uint32_t *) (ph + 1);
1110 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1112 *ippp = chk->rec.data.TSN_seq;
1114 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1116 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1117 sctp_abort_an_association(stcb->sctp_ep,
1118 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1123 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1124 chk->rec.data.stream_seq !=
1125 prev->rec.data.stream_seq) {
1127 * Huh, need the correct STR here,
1128 * they must be the same.
1130 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1131 chk->rec.data.stream_seq,
1132 prev->rec.data.stream_seq);
1133 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1134 0, M_DONTWAIT, 1, MT_DATA);
1136 struct sctp_paramhdr *ph;
1139 SCTP_BUF_LEN(oper) =
1140 sizeof(struct sctp_paramhdr) +
1141 (3 * sizeof(uint32_t));
1143 struct sctp_paramhdr *);
1145 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1147 htons(SCTP_BUF_LEN(oper));
1148 ippp = (uint32_t *) (ph + 1);
1149 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1151 *ippp = chk->rec.data.TSN_seq;
1153 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1155 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1156 sctp_abort_an_association(stcb->sctp_ep,
1157 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1162 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1163 SCTP_DATA_LAST_FRAG) {
1164 /* Insert chk MUST be a FIRST */
1165 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1166 SCTP_DATA_FIRST_FRAG) {
1167 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1168 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1169 0, M_DONTWAIT, 1, MT_DATA);
1171 struct sctp_paramhdr *ph;
1174 SCTP_BUF_LEN(oper) =
1175 sizeof(struct sctp_paramhdr) +
1176 (3 * sizeof(uint32_t));
1178 struct sctp_paramhdr *);
1180 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1182 htons(SCTP_BUF_LEN(oper));
1183 ippp = (uint32_t *) (ph + 1);
1184 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1186 *ippp = chk->rec.data.TSN_seq;
1188 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1191 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1192 sctp_abort_an_association(stcb->sctp_ep,
1193 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1202 post_tsn = chk->rec.data.TSN_seq + 1;
1203 if (post_tsn == next->rec.data.TSN_seq) {
1205 * Ok the one I am inserting ahead of is my NEXT
1206 * one. A bit of valdiation here.
1208 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1209 /* Insert chk MUST be a last fragment */
1210 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1211 != SCTP_DATA_LAST_FRAG) {
1212 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1213 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1214 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1215 0, M_DONTWAIT, 1, MT_DATA);
1217 struct sctp_paramhdr *ph;
1220 SCTP_BUF_LEN(oper) =
1221 sizeof(struct sctp_paramhdr) +
1222 (3 * sizeof(uint32_t));
1224 struct sctp_paramhdr *);
1226 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1228 htons(SCTP_BUF_LEN(oper));
1229 ippp = (uint32_t *) (ph + 1);
1230 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1232 *ippp = chk->rec.data.TSN_seq;
1234 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1236 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1237 sctp_abort_an_association(stcb->sctp_ep,
1238 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1243 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1244 SCTP_DATA_MIDDLE_FRAG ||
1245 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1246 SCTP_DATA_LAST_FRAG) {
1248 * Insert chk CAN be MIDDLE or FIRST NOT
1251 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1252 SCTP_DATA_LAST_FRAG) {
1253 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1254 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1255 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1256 0, M_DONTWAIT, 1, MT_DATA);
1258 struct sctp_paramhdr *ph;
1261 SCTP_BUF_LEN(oper) =
1262 sizeof(struct sctp_paramhdr) +
1263 (3 * sizeof(uint32_t));
1265 struct sctp_paramhdr *);
1267 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1269 htons(SCTP_BUF_LEN(oper));
1270 ippp = (uint32_t *) (ph + 1);
1271 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1273 *ippp = chk->rec.data.TSN_seq;
1275 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1278 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1279 sctp_abort_an_association(stcb->sctp_ep,
1280 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1285 if (chk->rec.data.stream_number !=
1286 next->rec.data.stream_number) {
1288 * Huh, need the correct STR here,
1289 * they must be the same.
1291 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1292 chk->rec.data.stream_number,
1293 next->rec.data.stream_number);
1294 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1295 0, M_DONTWAIT, 1, MT_DATA);
1297 struct sctp_paramhdr *ph;
1300 SCTP_BUF_LEN(oper) =
1301 sizeof(struct sctp_paramhdr) +
1302 (3 * sizeof(uint32_t));
1304 struct sctp_paramhdr *);
1306 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1308 htons(SCTP_BUF_LEN(oper));
1309 ippp = (uint32_t *) (ph + 1);
1310 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1312 *ippp = chk->rec.data.TSN_seq;
1314 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1317 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1318 sctp_abort_an_association(stcb->sctp_ep,
1319 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1324 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1325 chk->rec.data.stream_seq !=
1326 next->rec.data.stream_seq) {
1328 * Huh, need the correct STR here,
1329 * they must be the same.
1331 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1332 chk->rec.data.stream_seq,
1333 next->rec.data.stream_seq);
1334 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1335 0, M_DONTWAIT, 1, MT_DATA);
1337 struct sctp_paramhdr *ph;
1340 SCTP_BUF_LEN(oper) =
1341 sizeof(struct sctp_paramhdr) +
1342 (3 * sizeof(uint32_t));
1344 struct sctp_paramhdr *);
1346 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1348 htons(SCTP_BUF_LEN(oper));
1349 ippp = (uint32_t *) (ph + 1);
1350 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1352 *ippp = chk->rec.data.TSN_seq;
1354 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1356 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1357 sctp_abort_an_association(stcb->sctp_ep,
1358 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1366 /* Do we need to do some delivery? check */
1367 sctp_deliver_reasm_check(stcb, asoc);
1371 * This is an unfortunate routine. It checks to make sure a evil guy is not
1372 * stuffing us full of bad packet fragments. A broken peer could also do this
1373 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1377 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1380 struct sctp_tmit_chunk *at;
1383 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1384 if (compare_with_wrap(TSN_seq,
1385 at->rec.data.TSN_seq, MAX_TSN)) {
1386 /* is it one bigger? */
1387 tsn_est = at->rec.data.TSN_seq + 1;
1388 if (tsn_est == TSN_seq) {
1389 /* yep. It better be a last then */
1390 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1391 SCTP_DATA_LAST_FRAG) {
1393 * Ok this guy belongs next to a guy
1394 * that is NOT last, it should be a
1395 * middle/last, not a complete
1401 * This guy is ok since its a LAST
1402 * and the new chunk is a fully
1403 * self- contained one.
1408 } else if (TSN_seq == at->rec.data.TSN_seq) {
1409 /* Software error since I have a dup? */
1413 * Ok, 'at' is larger than new chunk but does it
1414 * need to be right before it.
1416 tsn_est = TSN_seq + 1;
1417 if (tsn_est == at->rec.data.TSN_seq) {
1418 /* Yep, It better be a first */
1419 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1420 SCTP_DATA_FIRST_FRAG) {
1433 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1434 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1435 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1436 int *break_flag, int last_chunk)
1438 /* Process a data chunk */
1439 /* struct sctp_tmit_chunk *chk; */
1440 struct sctp_tmit_chunk *chk;
1444 int need_reasm_check = 0;
1445 uint16_t strmno, strmseq;
1447 struct sctp_queued_to_read *control;
1449 uint32_t protocol_id;
1450 uint8_t chunk_flags;
1451 struct sctp_stream_reset_list *liste;
1454 tsn = ntohl(ch->dp.tsn);
1455 chunk_flags = ch->ch.chunk_flags;
1456 protocol_id = ch->dp.protocol_id;
1457 ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
1458 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
1459 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1464 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1465 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1466 asoc->cumulative_tsn == tsn) {
1467 /* It is a duplicate */
1468 SCTP_STAT_INCR(sctps_recvdupdata);
1469 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1470 /* Record a dup for the next outbound sack */
1471 asoc->dup_tsns[asoc->numduptsns] = tsn;
1476 /* Calculate the number of TSN's between the base and this TSN */
1477 if (tsn >= asoc->mapping_array_base_tsn) {
1478 gap = tsn - asoc->mapping_array_base_tsn;
1480 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1482 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1483 /* Can't hold the bit in the mapping at max array, toss it */
1486 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1487 SCTP_TCB_LOCK_ASSERT(stcb);
1488 if (sctp_expand_mapping_array(asoc, gap)) {
1489 /* Can't expand, drop it */
1493 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1496 /* See if we have received this one already */
1497 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1498 SCTP_STAT_INCR(sctps_recvdupdata);
1499 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1500 /* Record a dup for the next outbound sack */
1501 asoc->dup_tsns[asoc->numduptsns] = tsn;
1504 asoc->send_sack = 1;
1508 * Check to see about the GONE flag, duplicates would cause a sack
1509 * to be sent up above
1511 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1512 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1513 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1516 * wait a minute, this guy is gone, there is no longer a
1517 * receiver. Send peer an ABORT!
1519 struct mbuf *op_err;
1521 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1522 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1527 * Now before going further we see if there is room. If NOT then we
1528 * MAY let one through only IF this TSN is the one we are waiting
1529 * for on a partial delivery API.
1532 /* now do the tests */
1533 if (((asoc->cnt_on_all_streams +
1534 asoc->cnt_on_reasm_queue +
1535 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1536 (((int)asoc->my_rwnd) <= 0)) {
1538 * When we have NO room in the rwnd we check to make sure
1539 * the reader is doing its job...
1541 if (stcb->sctp_socket->so_rcv.sb_cc) {
1542 /* some to read, wake-up */
1543 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1546 so = SCTP_INP_SO(stcb->sctp_ep);
1547 atomic_add_int(&stcb->asoc.refcnt, 1);
1548 SCTP_TCB_UNLOCK(stcb);
1549 SCTP_SOCKET_LOCK(so, 1);
1550 SCTP_TCB_LOCK(stcb);
1551 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1552 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1553 /* assoc was freed while we were unlocked */
1554 SCTP_SOCKET_UNLOCK(so, 1);
1558 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1559 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1560 SCTP_SOCKET_UNLOCK(so, 1);
1563 /* now is it in the mapping array of what we have accepted? */
1564 if (compare_with_wrap(tsn,
1565 asoc->highest_tsn_inside_map, MAX_TSN)) {
1567 /* Nope not in the valid range dump it */
1568 SCTPDBG(SCTP_DEBUG_INDATA1, "My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n",
1569 (u_long)tsn, (u_long)asoc->my_rwnd,
1570 sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv));
1571 sctp_set_rwnd(stcb, asoc);
1572 if ((asoc->cnt_on_all_streams +
1573 asoc->cnt_on_reasm_queue +
1574 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1575 SCTP_STAT_INCR(sctps_datadropchklmt);
1577 SCTP_STAT_INCR(sctps_datadroprwnd);
1584 strmno = ntohs(ch->dp.stream_id);
1585 if (strmno >= asoc->streamincnt) {
1586 struct sctp_paramhdr *phdr;
1589 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1590 0, M_DONTWAIT, 1, MT_DATA);
1592 /* add some space up front so prepend will work well */
1593 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1594 phdr = mtod(mb, struct sctp_paramhdr *);
1596 * Error causes are just param's and this one has
1597 * two back to back phdr, one with the error type
1598 * and size, the other with the streamid and a rsvd
1600 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1601 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1602 phdr->param_length =
1603 htons(sizeof(struct sctp_paramhdr) * 2);
1605 /* We insert the stream in the type field */
1606 phdr->param_type = ch->dp.stream_id;
1607 /* And set the length to 0 for the rsvd field */
1608 phdr->param_length = 0;
1609 sctp_queue_op_err(stcb, mb);
1611 SCTP_STAT_INCR(sctps_badsid);
1612 SCTP_TCB_LOCK_ASSERT(stcb);
1613 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1614 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1615 /* we have a new high score */
1616 asoc->highest_tsn_inside_map = tsn;
1617 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
1618 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1621 if (tsn == (asoc->cumulative_tsn + 1)) {
1622 /* Update cum-ack */
1623 asoc->cumulative_tsn = tsn;
1628 * Before we continue lets validate that we are not being fooled by
1629 * an evil attacker. We can only have 4k chunks based on our TSN
1630 * spread allowed by the mapping array 512 * 8 bits, so there is no
1631 * way our stream sequence numbers could have wrapped. We of course
1632 * only validate the FIRST fragment so the bit must be set.
1634 strmseq = ntohs(ch->dp.stream_sequence);
1635 #ifdef SCTP_ASOCLOG_OF_TSNS
1636 SCTP_TCB_LOCK_ASSERT(stcb);
1637 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1638 asoc->tsn_in_at = 0;
1639 asoc->tsn_in_wrapped = 1;
1641 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1642 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1643 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1644 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1645 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1646 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1647 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1648 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1651 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1652 (TAILQ_EMPTY(&asoc->resetHead)) &&
1653 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1654 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1655 strmseq, MAX_SEQ) ||
1656 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1657 /* The incoming sseq is behind where we last delivered? */
1658 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1659 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1660 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1661 0, M_DONTWAIT, 1, MT_DATA);
1663 struct sctp_paramhdr *ph;
1666 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1667 (3 * sizeof(uint32_t));
1668 ph = mtod(oper, struct sctp_paramhdr *);
1669 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1670 ph->param_length = htons(SCTP_BUF_LEN(oper));
1671 ippp = (uint32_t *) (ph + 1);
1672 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1676 *ippp = ((strmno << 16) | strmseq);
1679 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1680 sctp_abort_an_association(stcb->sctp_ep, stcb,
1681 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1685 /************************************
1686 * From here down we may find ch-> invalid
1687 * so its a good idea NOT to use it.
1688 *************************************/
1690 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1691 if (last_chunk == 0) {
1692 dmbuf = SCTP_M_COPYM(*m,
1693 (offset + sizeof(struct sctp_data_chunk)),
1694 the_len, M_DONTWAIT);
1695 #ifdef SCTP_MBUF_LOGGING
1696 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
1701 if (SCTP_BUF_IS_EXTENDED(mat)) {
1702 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1704 mat = SCTP_BUF_NEXT(mat);
1709 /* We can steal the last chunk */
1713 /* lop off the top part */
1714 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1715 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1716 l_len = SCTP_BUF_LEN(dmbuf);
1719 * need to count up the size hopefully does not hit
1727 l_len += SCTP_BUF_LEN(lat);
1728 lat = SCTP_BUF_NEXT(lat);
1731 if (l_len > the_len) {
1732 /* Trim the end round bytes off too */
1733 m_adj(dmbuf, -(l_len - the_len));
1736 if (dmbuf == NULL) {
1737 SCTP_STAT_INCR(sctps_nomem);
1740 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1741 asoc->fragmented_delivery_inprogress == 0 &&
1742 TAILQ_EMPTY(&asoc->resetHead) &&
1744 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1745 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1746 /* Candidate for express delivery */
1748 * Its not fragmented, No PD-API is up, Nothing in the
1749 * delivery queue, Its un-ordered OR ordered and the next to
1750 * deliver AND nothing else is stuck on the stream queue,
1751 * And there is room for it in the socket buffer. Lets just
1752 * stuff it up the buffer....
1755 /* It would be nice to avoid this copy if we could :< */
1756 sctp_alloc_a_readq(stcb, control);
1757 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1763 if (control == NULL) {
1764 goto failed_express_del;
1766 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
1767 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1768 /* for ordered, bump what we delivered */
1769 asoc->strmin[strmno].last_sequence_delivered++;
1771 SCTP_STAT_INCR(sctps_recvexpress);
1772 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
1773 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1774 SCTP_STR_LOG_FROM_EXPRS_DEL);
1777 goto finish_express_del;
1780 /* If we reach here this is a new chunk */
1783 /* Express for fragmented delivery? */
1784 if ((asoc->fragmented_delivery_inprogress) &&
1785 (stcb->asoc.control_pdapi) &&
1786 (asoc->str_of_pdapi == strmno) &&
1787 (asoc->ssn_of_pdapi == strmseq)
1789 control = stcb->asoc.control_pdapi;
1790 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1791 /* Can't be another first? */
1792 goto failed_pdapi_express_del;
1794 if (tsn == (control->sinfo_tsn + 1)) {
1795 /* Yep, we can add it on */
1799 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1802 cumack = asoc->cumulative_tsn;
1803 if ((cumack + 1) == tsn)
1806 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1808 &stcb->sctp_socket->so_rcv)) {
1809 SCTP_PRINTF("Append fails end:%d\n", end);
1810 goto failed_pdapi_express_del;
1812 SCTP_STAT_INCR(sctps_recvexpressm);
1813 control->sinfo_tsn = tsn;
1814 asoc->tsn_last_delivered = tsn;
1815 asoc->fragment_flags = chunk_flags;
1816 asoc->tsn_of_pdapi_last_delivered = tsn;
1817 asoc->last_flags_delivered = chunk_flags;
1818 asoc->last_strm_seq_delivered = strmseq;
1819 asoc->last_strm_no_delivered = strmno;
1821 /* clean up the flags and such */
1822 asoc->fragmented_delivery_inprogress = 0;
1823 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1824 asoc->strmin[strmno].last_sequence_delivered++;
1826 stcb->asoc.control_pdapi = NULL;
1827 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1829 * There could be another message
1832 need_reasm_check = 1;
1836 goto finish_express_del;
1839 failed_pdapi_express_del:
1841 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1842 sctp_alloc_a_chunk(stcb, chk);
1844 /* No memory so we drop the chunk */
1845 SCTP_STAT_INCR(sctps_nomem);
1846 if (last_chunk == 0) {
1847 /* we copied it, free the copy */
1848 sctp_m_freem(dmbuf);
1852 chk->rec.data.TSN_seq = tsn;
1853 chk->no_fr_allowed = 0;
1854 chk->rec.data.stream_seq = strmseq;
1855 chk->rec.data.stream_number = strmno;
1856 chk->rec.data.payloadtype = protocol_id;
1857 chk->rec.data.context = stcb->asoc.context;
1858 chk->rec.data.doing_fast_retransmit = 0;
1859 chk->rec.data.rcv_flags = chunk_flags;
1861 chk->send_size = the_len;
1863 atomic_add_int(&net->ref_count, 1);
1866 sctp_alloc_a_readq(stcb, control);
1867 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1873 if (control == NULL) {
1874 /* No memory so we drop the chunk */
1875 SCTP_STAT_INCR(sctps_nomem);
1876 if (last_chunk == 0) {
1877 /* we copied it, free the copy */
1878 sctp_m_freem(dmbuf);
1882 control->length = the_len;
1885 /* Mark it as received */
1886 /* Now queue it where it belongs */
1887 if (control != NULL) {
1888 /* First a sanity check */
1889 if (asoc->fragmented_delivery_inprogress) {
1891 * Ok, we have a fragmented delivery in progress if
1892 * this chunk is next to deliver OR belongs in our
1893 * view to the reassembly, the peer is evil or
1896 uint32_t estimate_tsn;
1898 estimate_tsn = asoc->tsn_last_delivered + 1;
1899 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1900 (estimate_tsn == control->sinfo_tsn)) {
1901 /* Evil/Broke peer */
1902 sctp_m_freem(control->data);
1903 control->data = NULL;
1904 if (control->whoFrom) {
1905 sctp_free_remote_addr(control->whoFrom);
1906 control->whoFrom = NULL;
1908 sctp_free_a_readq(stcb, control);
1909 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1910 0, M_DONTWAIT, 1, MT_DATA);
1912 struct sctp_paramhdr *ph;
1915 SCTP_BUF_LEN(oper) =
1916 sizeof(struct sctp_paramhdr) +
1917 (3 * sizeof(uint32_t));
1918 ph = mtod(oper, struct sctp_paramhdr *);
1920 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1921 ph->param_length = htons(SCTP_BUF_LEN(oper));
1922 ippp = (uint32_t *) (ph + 1);
1923 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1927 *ippp = ((strmno << 16) | strmseq);
1929 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1930 sctp_abort_an_association(stcb->sctp_ep, stcb,
1931 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1936 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1937 sctp_m_freem(control->data);
1938 control->data = NULL;
1939 if (control->whoFrom) {
1940 sctp_free_remote_addr(control->whoFrom);
1941 control->whoFrom = NULL;
1943 sctp_free_a_readq(stcb, control);
1945 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1946 0, M_DONTWAIT, 1, MT_DATA);
1948 struct sctp_paramhdr *ph;
1951 SCTP_BUF_LEN(oper) =
1952 sizeof(struct sctp_paramhdr) +
1953 (3 * sizeof(uint32_t));
1955 struct sctp_paramhdr *);
1957 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1959 htons(SCTP_BUF_LEN(oper));
1960 ippp = (uint32_t *) (ph + 1);
1961 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1965 *ippp = ((strmno << 16) | strmseq);
1967 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1968 sctp_abort_an_association(stcb->sctp_ep,
1969 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1976 /* No PDAPI running */
1977 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1979 * Reassembly queue is NOT empty validate
1980 * that this tsn does not need to be in
1981 * reasembly queue. If it does then our peer
1982 * is broken or evil.
1984 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1985 sctp_m_freem(control->data);
1986 control->data = NULL;
1987 if (control->whoFrom) {
1988 sctp_free_remote_addr(control->whoFrom);
1989 control->whoFrom = NULL;
1991 sctp_free_a_readq(stcb, control);
1992 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1993 0, M_DONTWAIT, 1, MT_DATA);
1995 struct sctp_paramhdr *ph;
1998 SCTP_BUF_LEN(oper) =
1999 sizeof(struct sctp_paramhdr) +
2000 (3 * sizeof(uint32_t));
2002 struct sctp_paramhdr *);
2004 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2006 htons(SCTP_BUF_LEN(oper));
2007 ippp = (uint32_t *) (ph + 1);
2008 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2012 *ippp = ((strmno << 16) | strmseq);
2014 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2015 sctp_abort_an_association(stcb->sctp_ep,
2016 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2023 /* ok, if we reach here we have passed the sanity checks */
2024 if (chunk_flags & SCTP_DATA_UNORDERED) {
2025 /* queue directly into socket buffer */
2026 sctp_add_to_readq(stcb->sctp_ep, stcb,
2028 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
2031 * Special check for when streams are resetting. We
2032 * could be more smart about this and check the
2033 * actual stream to see if it is not being reset..
2034 * that way we would not create a HOLB when amongst
2035 * streams being reset and those not being reset.
2037 * We take complete messages that have a stream reset
2038 * intervening (aka the TSN is after where our
2039 * cum-ack needs to be) off and put them on a
2040 * pending_reply_queue. The reassembly ones we do
2041 * not have to worry about since they are all sorted
2042 * and proceessed by TSN order. It is only the
2043 * singletons I must worry about.
2045 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2046 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2049 * yep its past where we need to reset... go
2050 * ahead and queue it.
2052 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2054 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2056 struct sctp_queued_to_read *ctlOn;
2057 unsigned char inserted = 0;
2059 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2061 if (compare_with_wrap(control->sinfo_tsn,
2062 ctlOn->sinfo_tsn, MAX_TSN)) {
2063 ctlOn = TAILQ_NEXT(ctlOn, next);
2066 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2071 if (inserted == 0) {
2073 * must be put at end, use
2074 * prevP (all setup from
2075 * loop) to setup nextP.
2077 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2081 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2088 /* Into the re-assembly queue */
2089 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2092 * the assoc is now gone and chk was put onto the
2093 * reasm queue, which has all been freed.
2100 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2101 /* we have a new high score */
2102 asoc->highest_tsn_inside_map = tsn;
2103 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
2104 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2107 if (tsn == (asoc->cumulative_tsn + 1)) {
2108 /* Update cum-ack */
2109 asoc->cumulative_tsn = tsn;
2115 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2117 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2119 SCTP_STAT_INCR(sctps_recvdata);
2120 /* Set it present please */
2121 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) {
2122 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2124 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
2125 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2126 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2128 SCTP_TCB_LOCK_ASSERT(stcb);
2129 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2130 /* check the special flag for stream resets */
2131 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2132 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2133 (asoc->cumulative_tsn == liste->tsn))
2136 * we have finished working through the backlogged TSN's now
2137 * time to reset streams. 1: call reset function. 2: free
2138 * pending_reply space 3: distribute any chunks in
2139 * pending_reply_queue.
2141 struct sctp_queued_to_read *ctl;
2143 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2144 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2145 SCTP_FREE(liste, SCTP_M_STRESET);
2146 /* sa_ignore FREED_MEMORY */
2147 liste = TAILQ_FIRST(&asoc->resetHead);
2148 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2149 if (ctl && (liste == NULL)) {
2150 /* All can be removed */
2152 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2153 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2157 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2160 /* more than one in queue */
2161 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2163 * if ctl->sinfo_tsn is <= liste->tsn we can
2164 * process it which is the NOT of
2165 * ctl->sinfo_tsn > liste->tsn
2167 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2168 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2172 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2176 * Now service re-assembly to pick up anything that has been
2177 * held on reassembly queue?
2179 sctp_deliver_reasm_check(stcb, asoc);
2180 need_reasm_check = 0;
2182 if (need_reasm_check) {
2183 /* Another one waits ? */
2184 sctp_deliver_reasm_check(stcb, asoc);
2189 int8_t sctp_map_lookup_tab[256] = {
2190 -1, 0, -1, 1, -1, 0, -1, 2,
2191 -1, 0, -1, 1, -1, 0, -1, 3,
2192 -1, 0, -1, 1, -1, 0, -1, 2,
2193 -1, 0, -1, 1, -1, 0, -1, 4,
2194 -1, 0, -1, 1, -1, 0, -1, 2,
2195 -1, 0, -1, 1, -1, 0, -1, 3,
2196 -1, 0, -1, 1, -1, 0, -1, 2,
2197 -1, 0, -1, 1, -1, 0, -1, 5,
2198 -1, 0, -1, 1, -1, 0, -1, 2,
2199 -1, 0, -1, 1, -1, 0, -1, 3,
2200 -1, 0, -1, 1, -1, 0, -1, 2,
2201 -1, 0, -1, 1, -1, 0, -1, 4,
2202 -1, 0, -1, 1, -1, 0, -1, 2,
2203 -1, 0, -1, 1, -1, 0, -1, 3,
2204 -1, 0, -1, 1, -1, 0, -1, 2,
2205 -1, 0, -1, 1, -1, 0, -1, 6,
2206 -1, 0, -1, 1, -1, 0, -1, 2,
2207 -1, 0, -1, 1, -1, 0, -1, 3,
2208 -1, 0, -1, 1, -1, 0, -1, 2,
2209 -1, 0, -1, 1, -1, 0, -1, 4,
2210 -1, 0, -1, 1, -1, 0, -1, 2,
2211 -1, 0, -1, 1, -1, 0, -1, 3,
2212 -1, 0, -1, 1, -1, 0, -1, 2,
2213 -1, 0, -1, 1, -1, 0, -1, 5,
2214 -1, 0, -1, 1, -1, 0, -1, 2,
2215 -1, 0, -1, 1, -1, 0, -1, 3,
2216 -1, 0, -1, 1, -1, 0, -1, 2,
2217 -1, 0, -1, 1, -1, 0, -1, 4,
2218 -1, 0, -1, 1, -1, 0, -1, 2,
2219 -1, 0, -1, 1, -1, 0, -1, 3,
2220 -1, 0, -1, 1, -1, 0, -1, 2,
2221 -1, 0, -1, 1, -1, 0, -1, 7,
2226 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2229 * Now we also need to check the mapping array in a couple of ways.
2230 * 1) Did we move the cum-ack point?
2232 struct sctp_association *asoc;
2234 int last_all_ones = 0;
2235 int slide_from, slide_end, lgap, distance;
2236 uint32_t old_cumack, old_base, old_highest;
2237 unsigned char aux_array[64];
2243 old_cumack = asoc->cumulative_tsn;
2244 old_base = asoc->mapping_array_base_tsn;
2245 old_highest = asoc->highest_tsn_inside_map;
2246 if (asoc->mapping_array_size < 64)
2247 memcpy(aux_array, asoc->mapping_array,
2248 asoc->mapping_array_size);
2250 memcpy(aux_array, asoc->mapping_array, 64);
2253 * We could probably improve this a small bit by calculating the
2254 * offset of the current cum-ack as the starting point.
2257 for (i = 0; i < stcb->asoc.mapping_array_size; i++) {
2259 if (asoc->mapping_array[i] == 0xff) {
2263 /* there is a 0 bit */
2264 at += sctp_map_lookup_tab[asoc->mapping_array[i]];
2269 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones);
2270 /* at is one off, since in the table a embedded -1 is present */
2273 if (compare_with_wrap(asoc->cumulative_tsn,
2274 asoc->highest_tsn_inside_map,
2277 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2278 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2280 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2281 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2282 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2285 if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) {
2286 /* The complete array was completed by a single FR */
2287 /* higest becomes the cum-ack */
2290 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2291 /* clear the array */
2292 clr = (at >> 3) + 1;
2293 if (clr > asoc->mapping_array_size) {
2294 clr = asoc->mapping_array_size;
2296 memset(asoc->mapping_array, 0, clr);
2297 /* base becomes one ahead of the cum-ack */
2298 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2299 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
2300 sctp_log_map(old_base, old_cumack, old_highest,
2301 SCTP_MAP_PREPARE_SLIDE);
2302 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2303 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2305 } else if (at >= 8) {
2306 /* we can slide the mapping array down */
2307 /* Calculate the new byte postion we can move down */
2308 slide_from = at >> 3;
2310 * now calculate the ceiling of the move using our highest
2313 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2314 lgap = asoc->highest_tsn_inside_map -
2315 asoc->mapping_array_base_tsn;
2317 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2318 asoc->highest_tsn_inside_map + 1;
2320 slide_end = lgap >> 3;
2321 if (slide_end < slide_from) {
2322 panic("impossible slide");
2324 distance = (slide_end - slide_from) + 1;
2325 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
2326 sctp_log_map(old_base, old_cumack, old_highest,
2327 SCTP_MAP_PREPARE_SLIDE);
2328 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2329 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2331 if (distance + slide_from > asoc->mapping_array_size ||
2334 * Here we do NOT slide forward the array so that
2335 * hopefully when more data comes in to fill it up
2336 * we will be able to slide it forward. Really I
2337 * don't think this should happen :-0
2340 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
2341 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2342 (uint32_t) asoc->mapping_array_size,
2343 SCTP_MAP_SLIDE_NONE);
2348 for (ii = 0; ii < distance; ii++) {
2349 asoc->mapping_array[ii] =
2350 asoc->mapping_array[slide_from + ii];
2352 for (ii = distance; ii <= slide_end; ii++) {
2353 asoc->mapping_array[ii] = 0;
2355 asoc->mapping_array_base_tsn += (slide_from << 3);
2356 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
2357 sctp_log_map(asoc->mapping_array_base_tsn,
2358 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2359 SCTP_MAP_SLIDE_RESULT);
2364 * Now we need to see if we need to queue a sack or just start the
2365 * timer (if allowed).
2368 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2370 * Ok special case, in SHUTDOWN-SENT case. here we
2371 * maker sure SACK timer is off and instead send a
2372 * SHUTDOWN and a SACK
2374 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2375 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2376 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2378 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2379 sctp_send_sack(stcb);
2383 /* is there a gap now ? */
2384 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2385 stcb->asoc.cumulative_tsn, MAX_TSN);
2388 * CMT DAC algorithm: increase number of packets
2389 * received since last ack
2391 stcb->asoc.cmt_dac_pkts_rcvd++;
2393 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2395 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2397 (stcb->asoc.numduptsns) || /* we have dup's */
2398 (is_a_gap) || /* is still a gap */
2399 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2400 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2403 if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) &&
2404 (stcb->asoc.send_sack == 0) &&
2405 (stcb->asoc.numduptsns == 0) &&
2406 (stcb->asoc.delayed_ack) &&
2407 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2410 * CMT DAC algorithm: With CMT,
2411 * delay acks even in the face of
2413 * reordering. Therefore, if acks that
2414 * do not have to be sent because of
2415 * the above reasons, will be
2416 * delayed. That is, acks that would
2417 * have been sent due to gap reports
2418 * will be delayed with DAC. Start
2419 * the delayed ack timer.
2421 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2422 stcb->sctp_ep, stcb, NULL);
2425 * Ok we must build a SACK since the
2426 * timer is pending, we got our
2427 * first packet OR there are gaps or
2430 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2431 sctp_send_sack(stcb);
2434 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2435 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2436 stcb->sctp_ep, stcb, NULL);
2444 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2446 struct sctp_tmit_chunk *chk;
2450 if (asoc->fragmented_delivery_inprogress) {
2451 sctp_service_reassembly(stcb, asoc);
2453 /* Can we proceed further, i.e. the PD-API is complete */
2454 if (asoc->fragmented_delivery_inprogress) {
2459 * Now is there some other chunk I can deliver from the reassembly
2463 chk = TAILQ_FIRST(&asoc->reasmqueue);
2465 asoc->size_on_reasm_queue = 0;
2466 asoc->cnt_on_reasm_queue = 0;
2469 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2470 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2471 ((nxt_todel == chk->rec.data.stream_seq) ||
2472 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2474 * Yep the first one is here. We setup to start reception,
2475 * by backing down the TSN just in case we can't deliver.
2479 * Before we start though either all of the message should
2480 * be here or 1/4 the socket buffer max or nothing on the
2481 * delivery queue and something can be delivered.
2483 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2484 (tsize >= stcb->sctp_ep->partial_delivery_point))) {
2485 asoc->fragmented_delivery_inprogress = 1;
2486 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2487 asoc->str_of_pdapi = chk->rec.data.stream_number;
2488 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2489 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2490 asoc->fragment_flags = chk->rec.data.rcv_flags;
2491 sctp_service_reassembly(stcb, asoc);
2492 if (asoc->fragmented_delivery_inprogress == 0) {
2500 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2501 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2502 struct sctp_nets *net, uint32_t * high_tsn)
2504 struct sctp_data_chunk *ch, chunk_buf;
2505 struct sctp_association *asoc;
2506 int num_chunks = 0; /* number of control chunks processed */
2508 int chk_length, break_flag, last_chunk;
2509 int abort_flag = 0, was_a_gap = 0;
2513 sctp_set_rwnd(stcb, &stcb->asoc);
2516 SCTP_TCB_LOCK_ASSERT(stcb);
2518 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2519 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2520 /* there was a gap before this data was processed */
2524 * setup where we got the last DATA packet from for any SACK that
2525 * may need to go out. Don't bump the net. This is done ONLY when a
2526 * chunk is assigned.
2528 asoc->last_data_chunk_from = net;
2531 * Now before we proceed we must figure out if this is a wasted
2532 * cluster... i.e. it is a small packet sent in and yet the driver
2533 * underneath allocated a full cluster for it. If so we must copy it
2534 * to a smaller mbuf and free up the cluster mbuf. This will help
2535 * with cluster starvation. Note for __Panda__ we don't do this
2536 * since it has clusters all the way down to 64 bytes.
2538 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2539 /* we only handle mbufs that are singletons.. not chains */
2540 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2542 /* ok lets see if we can copy the data up */
2545 /* get the pointers and copy */
2546 to = mtod(m, caddr_t *);
2547 from = mtod((*mm), caddr_t *);
2548 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2549 /* copy the length and free up the old */
2550 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2552 /* sucess, back copy */
2555 /* We are in trouble in the mbuf world .. yikes */
2559 /* get pointer to the first chunk header */
2560 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2561 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2566 * process all DATA chunks...
2568 *high_tsn = asoc->cumulative_tsn;
2570 asoc->data_pkts_seen++;
2571 while (stop_proc == 0) {
2572 /* validate chunk length */
2573 chk_length = ntohs(ch->ch.chunk_length);
2574 if (length - *offset < chk_length) {
2575 /* all done, mutulated chunk */
2579 if (ch->ch.chunk_type == SCTP_DATA) {
2580 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2582 * Need to send an abort since we had a
2583 * invalid data chunk.
2585 struct mbuf *op_err;
2587 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2588 0, M_DONTWAIT, 1, MT_DATA);
2591 struct sctp_paramhdr *ph;
2594 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2595 (2 * sizeof(uint32_t));
2596 ph = mtod(op_err, struct sctp_paramhdr *);
2598 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2599 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2600 ippp = (uint32_t *) (ph + 1);
2601 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2603 *ippp = asoc->cumulative_tsn;
2606 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2607 sctp_abort_association(inp, stcb, m, iphlen, sh,
2611 #ifdef SCTP_AUDITING_ENABLED
2612 sctp_audit_log(0xB1, 0);
2614 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2619 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2620 chk_length, net, high_tsn, &abort_flag, &break_flag,
2629 * Set because of out of rwnd space and no
2630 * drop rep space left.
2636 /* not a data chunk in the data region */
2637 switch (ch->ch.chunk_type) {
2638 case SCTP_INITIATION:
2639 case SCTP_INITIATION_ACK:
2640 case SCTP_SELECTIVE_ACK:
2641 case SCTP_HEARTBEAT_REQUEST:
2642 case SCTP_HEARTBEAT_ACK:
2643 case SCTP_ABORT_ASSOCIATION:
2645 case SCTP_SHUTDOWN_ACK:
2646 case SCTP_OPERATION_ERROR:
2647 case SCTP_COOKIE_ECHO:
2648 case SCTP_COOKIE_ACK:
2651 case SCTP_SHUTDOWN_COMPLETE:
2652 case SCTP_AUTHENTICATION:
2653 case SCTP_ASCONF_ACK:
2654 case SCTP_PACKET_DROPPED:
2655 case SCTP_STREAM_RESET:
2656 case SCTP_FORWARD_CUM_TSN:
2659 * Now, what do we do with KNOWN chunks that
2660 * are NOT in the right place?
2662 * For now, I do nothing but ignore them. We
2663 * may later want to add sysctl stuff to
2664 * switch out and do either an ABORT() or
2665 * possibly process them.
2667 if (sctp_strict_data_order) {
2668 struct mbuf *op_err;
2670 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2671 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0);
2676 /* unknown chunk type, use bit rules */
2677 if (ch->ch.chunk_type & 0x40) {
2678 /* Add a error report to the queue */
2680 struct sctp_paramhdr *phd;
2682 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2684 phd = mtod(merr, struct sctp_paramhdr *);
2686 * We cheat and use param
2687 * type since we did not
2688 * bother to define a error
2689 * cause struct. They are
2690 * the same basic format
2691 * with different names.
2694 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2696 htons(chk_length + sizeof(*phd));
2697 SCTP_BUF_LEN(merr) = sizeof(*phd);
2698 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2699 SCTP_SIZE32(chk_length),
2701 if (SCTP_BUF_NEXT(merr)) {
2702 sctp_queue_op_err(stcb, merr);
2708 if ((ch->ch.chunk_type & 0x80) == 0) {
2709 /* discard the rest of this packet */
2711 } /* else skip this bad chunk and
2714 }; /* switch of chunk type */
2716 *offset += SCTP_SIZE32(chk_length);
2717 if ((*offset >= length) || stop_proc) {
2718 /* no more data left in the mbuf chain */
2722 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2723 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2733 * we need to report rwnd overrun drops.
2735 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2739 * Did we get data, if so update the time for auto-close and
2740 * give peer credit for being alive.
2742 SCTP_STAT_INCR(sctps_recvpktwithdata);
2743 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
2744 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2745 stcb->asoc.overall_error_count,
2747 SCTP_FROM_SCTP_INDATA,
2750 stcb->asoc.overall_error_count = 0;
2751 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2753 /* now service all of the reassm queue if needed */
2754 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2755 sctp_service_queues(stcb, asoc);
2757 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2758 /* Assure that we ack right away */
2759 stcb->asoc.send_sack = 1;
2761 /* Start a sack timer or QUEUE a SACK for sending */
2762 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
2763 (stcb->asoc.mapping_array[0] != 0xff)) {
2764 if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) ||
2765 (stcb->asoc.delayed_ack == 0) ||
2766 (stcb->asoc.send_sack == 1)) {
2767 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2768 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2770 sctp_send_sack(stcb);
2772 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2773 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2774 stcb->sctp_ep, stcb, NULL);
2778 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2787 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2788 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2789 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2790 int num_seg, int *ecn_seg_sums)
2792 /************************************************/
2793 /* process fragments and update sendqueue */
2794 /************************************************/
2795 struct sctp_sack *sack;
2796 struct sctp_gap_ack_block *frag, block;
2797 struct sctp_tmit_chunk *tp1;
2802 uint16_t frag_strt, frag_end, primary_flag_set;
2803 u_long last_frag_high;
2806 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
2808 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2809 primary_flag_set = 1;
2811 primary_flag_set = 0;
2815 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2816 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2817 *offset += sizeof(block);
2823 for (i = 0; i < num_seg; i++) {
2824 frag_strt = ntohs(frag->start);
2825 frag_end = ntohs(frag->end);
2826 /* some sanity checks on the fargment offsets */
2827 if (frag_strt > frag_end) {
2828 /* this one is malformed, skip */
2832 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
2834 *biggest_tsn_acked = frag_end + last_tsn;
2836 /* mark acked dgs and find out the highestTSN being acked */
2838 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2840 /* save the locations of the last frags */
2841 last_frag_high = frag_end + last_tsn;
2844 * now lets see if we need to reset the queue due to
2845 * a out-of-order SACK fragment
2847 if (compare_with_wrap(frag_strt + last_tsn,
2848 last_frag_high, MAX_TSN)) {
2850 * if the new frag starts after the last TSN
2851 * frag covered, we are ok and this one is
2852 * beyond the last one
2857 * ok, they have reset us, so we need to
2858 * reset the queue this will cause extra
2859 * hunting but hey, they chose the
2860 * performance hit when they failed to order
2863 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2865 last_frag_high = frag_end + last_tsn;
2867 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2869 if (tp1->rec.data.doing_fast_retransmit)
2873 * CMT: CUCv2 algorithm. For each TSN being
2874 * processed from the sent queue, track the
2875 * next expected pseudo-cumack, or
2876 * rtx_pseudo_cumack, if required. Separate
2877 * cumack trackers for first transmissions,
2878 * and retransmissions.
2880 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2881 (tp1->snd_count == 1)) {
2882 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2883 tp1->whoTo->find_pseudo_cumack = 0;
2885 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2886 (tp1->snd_count > 1)) {
2887 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2888 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2890 if (tp1->rec.data.TSN_seq == j) {
2891 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2893 * must be held until
2897 * ECN Nonce: Add the nonce
2898 * value to the sender's
2901 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2903 * If it is less than RESEND, it is
2904 * now no-longer in flight.
2905 * Higher values may already be set
2906 * via previous Gap Ack Blocks...
2907 * i.e. ACKED or RESEND.
2909 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2910 *biggest_newly_acked_tsn, MAX_TSN)) {
2911 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2920 * this_sack_highest_
2924 if (tp1->rec.data.chunk_was_revoked == 0)
2925 tp1->whoTo->saw_newack = 1;
2927 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2928 tp1->whoTo->this_sack_highest_newack,
2930 tp1->whoTo->this_sack_highest_newack =
2931 tp1->rec.data.TSN_seq;
2936 * this_sack_lowest_n
2939 if (*this_sack_lowest_newack == 0) {
2940 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
2941 sctp_log_sack(*this_sack_lowest_newack,
2943 tp1->rec.data.TSN_seq,
2946 SCTP_LOG_TSN_ACKED);
2948 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2953 * (rtx-)pseudo-cumac
2958 * (rtx-)pseudo-cumac
2960 * new_(rtx_)pseudo_c
2968 * (rtx-)pseudo-cumac
2976 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2977 if (tp1->rec.data.chunk_was_revoked == 0) {
2978 tp1->whoTo->new_pseudo_cumack = 1;
2980 tp1->whoTo->find_pseudo_cumack = 1;
2982 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
2983 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2985 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2986 if (tp1->rec.data.chunk_was_revoked == 0) {
2987 tp1->whoTo->new_pseudo_cumack = 1;
2989 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2991 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
2992 sctp_log_sack(*biggest_newly_acked_tsn,
2994 tp1->rec.data.TSN_seq,
2997 SCTP_LOG_TSN_ACKED);
2999 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
3000 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3001 tp1->whoTo->flight_size,
3003 (uintptr_t) tp1->whoTo,
3004 tp1->rec.data.TSN_seq);
3006 sctp_flight_size_decrease(tp1);
3007 sctp_total_flight_decrease(stcb, tp1);
3009 tp1->whoTo->net_ack += tp1->send_size;
3010 if (tp1->snd_count < 2) {
3016 tp1->whoTo->net_ack2 += tp1->send_size;
3023 sctp_calculate_rto(stcb,
3026 &tp1->sent_rcv_time,
3027 sctp_align_safe_nocopy);
3032 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3033 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3034 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3035 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3036 asoc->this_sack_highest_gap,
3038 asoc->this_sack_highest_gap =
3039 tp1->rec.data.TSN_seq;
3041 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3042 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3043 #ifdef SCTP_AUDITING_ENABLED
3044 sctp_audit_log(0xB2,
3045 (asoc->sent_queue_retran_cnt & 0x000000ff));
3050 * All chunks NOT UNSENT
3051 * fall through here and are
3054 tp1->sent = SCTP_DATAGRAM_MARKED;
3055 if (tp1->rec.data.chunk_was_revoked) {
3056 /* deflate the cwnd */
3057 tp1->whoTo->cwnd -= tp1->book_size;
3058 tp1->rec.data.chunk_was_revoked = 0;
3062 } /* if (tp1->TSN_seq == j) */
3063 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
3067 tp1 = TAILQ_NEXT(tp1, sctp_next);
3068 } /* end while (tp1) */
3069 } /* end for (j = fragStart */
3070 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3071 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3072 *offset += sizeof(block);
3077 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
3079 sctp_log_fr(*biggest_tsn_acked,
3080 *biggest_newly_acked_tsn,
3081 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3086 sctp_check_for_revoked(struct sctp_tcb *stcb,
3087 struct sctp_association *asoc, uint32_t cumack,
3088 u_long biggest_tsn_acked)
3090 struct sctp_tmit_chunk *tp1;
3091 int tot_revoked = 0;
3093 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3095 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3098 * ok this guy is either ACK or MARKED. If it is
3099 * ACKED it has been previously acked but not this
3100 * time i.e. revoked. If it is MARKED it was ACK'ed
3103 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3108 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3109 /* it has been revoked */
3110 tp1->sent = SCTP_DATAGRAM_SENT;
3111 tp1->rec.data.chunk_was_revoked = 1;
3113 * We must add this stuff back in to assure
3114 * timers and such get started.
3116 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
3117 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3118 tp1->whoTo->flight_size,
3120 (uintptr_t) tp1->whoTo,
3121 tp1->rec.data.TSN_seq);
3123 sctp_flight_size_increase(tp1);
3124 sctp_total_flight_increase(stcb, tp1);
3126 * We inflate the cwnd to compensate for our
3127 * artificial inflation of the flight_size.
3129 tp1->whoTo->cwnd += tp1->book_size;
3131 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
3132 sctp_log_sack(asoc->last_acked_seq,
3134 tp1->rec.data.TSN_seq,
3137 SCTP_LOG_TSN_REVOKED);
3139 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3140 /* it has been re-acked in this SACK */
3141 tp1->sent = SCTP_DATAGRAM_ACKED;
3144 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3146 tp1 = TAILQ_NEXT(tp1, sctp_next);
3148 if (tot_revoked > 0) {
3150 * Setup the ecn nonce re-sync point. We do this since once
3151 * data is revoked we begin to retransmit things, which do
3152 * NOT have the ECN bits set. This means we are now out of
3153 * sync and must wait until we get back in sync with the
3154 * peer to check ECN bits.
3156 tp1 = TAILQ_FIRST(&asoc->send_queue);
3158 asoc->nonce_resync_tsn = asoc->sending_seq;
3160 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3162 asoc->nonce_wait_for_ecne = 0;
3163 asoc->nonce_sum_check = 0;
3168 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3169 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3171 struct sctp_tmit_chunk *tp1;
3172 int strike_flag = 0;
3174 int tot_retrans = 0;
3175 uint32_t sending_seq;
3176 struct sctp_nets *net;
3177 int num_dests_sacked = 0;
3180 * select the sending_seq, this is either the next thing ready to be
3181 * sent but not transmitted, OR, the next seq we assign.
3183 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3185 sending_seq = asoc->sending_seq;
3187 sending_seq = tp1->rec.data.TSN_seq;
3190 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3191 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3192 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3193 if (net->saw_newack)
3197 if (stcb->asoc.peer_supports_prsctp) {
3198 (void)SCTP_GETTIME_TIMEVAL(&now);
3200 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3203 if (tp1->no_fr_allowed) {
3204 /* this one had a timeout or something */
3205 tp1 = TAILQ_NEXT(tp1, sctp_next);
3208 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
3209 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3210 sctp_log_fr(biggest_tsn_newly_acked,
3211 tp1->rec.data.TSN_seq,
3213 SCTP_FR_LOG_CHECK_STRIKE);
3215 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3217 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3221 if (stcb->asoc.peer_supports_prsctp) {
3222 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3223 /* Is it expired? */
3225 (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3227 /* Yes so drop it */
3228 if (tp1->data != NULL) {
3229 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3230 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3231 &asoc->sent_queue, SCTP_SO_NOT_LOCKED);
3233 tp1 = TAILQ_NEXT(tp1, sctp_next);
3237 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3238 /* Has it been retransmitted tv_sec times? */
3239 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3240 /* Yes, so drop it */
3241 if (tp1->data != NULL) {
3242 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3243 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3244 &asoc->sent_queue, SCTP_SO_NOT_LOCKED);
3246 tp1 = TAILQ_NEXT(tp1, sctp_next);
3251 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3252 asoc->this_sack_highest_gap, MAX_TSN)) {
3253 /* we are beyond the tsn in the sack */
3256 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3257 /* either a RESEND, ACKED, or MARKED */
3259 tp1 = TAILQ_NEXT(tp1, sctp_next);
3263 * CMT : SFR algo (covers part of DAC and HTNA as well)
3265 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3267 * No new acks were receieved for data sent to this
3268 * dest. Therefore, according to the SFR algo for
3269 * CMT, no data sent to this dest can be marked for
3270 * FR using this SACK.
3272 tp1 = TAILQ_NEXT(tp1, sctp_next);
3274 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3275 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3277 * CMT: New acks were receieved for data sent to
3278 * this dest. But no new acks were seen for data
3279 * sent after tp1. Therefore, according to the SFR
3280 * algo for CMT, tp1 cannot be marked for FR using
3281 * this SACK. This step covers part of the DAC algo
3282 * and the HTNA algo as well.
3284 tp1 = TAILQ_NEXT(tp1, sctp_next);
3288 * Here we check to see if we were have already done a FR
3289 * and if so we see if the biggest TSN we saw in the sack is
3290 * smaller than the recovery point. If so we don't strike
3291 * the tsn... otherwise we CAN strike the TSN.
3294 * @@@ JRI: Check for CMT if (accum_moved &&
3295 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3298 if (accum_moved && asoc->fast_retran_loss_recovery) {
3300 * Strike the TSN if in fast-recovery and cum-ack
3303 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
3304 sctp_log_fr(biggest_tsn_newly_acked,
3305 tp1->rec.data.TSN_seq,
3307 SCTP_FR_LOG_STRIKE_CHUNK);
3309 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3312 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3314 * CMT DAC algorithm: If SACK flag is set to
3315 * 0, then lowest_newack test will not pass
3316 * because it would have been set to the
3317 * cumack earlier. If not already to be
3318 * rtx'd, If not a mixed sack and if tp1 is
3319 * not between two sacked TSNs, then mark by
3320 * one more. NOTE that we are marking by one
3321 * additional time since the SACK DAC flag
3322 * indicates that two packets have been
3323 * received after this missing TSN.
3325 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3326 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3327 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
3328 sctp_log_fr(16 + num_dests_sacked,
3329 tp1->rec.data.TSN_seq,
3331 SCTP_FR_LOG_STRIKE_CHUNK);
3336 } else if (tp1->rec.data.doing_fast_retransmit) {
3338 * For those that have done a FR we must take
3339 * special consideration if we strike. I.e the
3340 * biggest_newly_acked must be higher than the
3341 * sending_seq at the time we did the FR.
3344 #ifdef SCTP_FR_TO_ALTERNATE
3346 * If FR's go to new networks, then we must only do
3347 * this for singly homed asoc's. However if the FR's
3348 * go to the same network (Armando's work) then its
3349 * ok to FR multiple times.
3357 if ((compare_with_wrap(biggest_tsn_newly_acked,
3358 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3359 (biggest_tsn_newly_acked ==
3360 tp1->rec.data.fast_retran_tsn)) {
3362 * Strike the TSN, since this ack is
3363 * beyond where things were when we
3366 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
3367 sctp_log_fr(biggest_tsn_newly_acked,
3368 tp1->rec.data.TSN_seq,
3370 SCTP_FR_LOG_STRIKE_CHUNK);
3372 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3376 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3378 * CMT DAC algorithm: If
3379 * SACK flag is set to 0,
3380 * then lowest_newack test
3381 * will not pass because it
3382 * would have been set to
3383 * the cumack earlier. If
3384 * not already to be rtx'd,
3385 * If not a mixed sack and
3386 * if tp1 is not between two
3387 * sacked TSNs, then mark by
3388 * one more. NOTE that we
3389 * are marking by one
3390 * additional time since the
3391 * SACK DAC flag indicates
3392 * that two packets have
3393 * been received after this
3396 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3397 (num_dests_sacked == 1) &&
3398 compare_with_wrap(this_sack_lowest_newack,
3399 tp1->rec.data.TSN_seq, MAX_TSN)) {
3400 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
3401 sctp_log_fr(32 + num_dests_sacked,
3402 tp1->rec.data.TSN_seq,
3404 SCTP_FR_LOG_STRIKE_CHUNK);
3406 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3414 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3417 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3418 biggest_tsn_newly_acked, MAX_TSN)) {
3420 * We don't strike these: This is the HTNA
3421 * algorithm i.e. we don't strike If our TSN is
3422 * larger than the Highest TSN Newly Acked.
3426 /* Strike the TSN */
3427 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
3428 sctp_log_fr(biggest_tsn_newly_acked,
3429 tp1->rec.data.TSN_seq,
3431 SCTP_FR_LOG_STRIKE_CHUNK);
3433 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3436 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
3438 * CMT DAC algorithm: If SACK flag is set to
3439 * 0, then lowest_newack test will not pass
3440 * because it would have been set to the
3441 * cumack earlier. If not already to be
3442 * rtx'd, If not a mixed sack and if tp1 is
3443 * not between two sacked TSNs, then mark by
3444 * one more. NOTE that we are marking by one
3445 * additional time since the SACK DAC flag
3446 * indicates that two packets have been
3447 * received after this missing TSN.
3449 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3450 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3451 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
3452 sctp_log_fr(48 + num_dests_sacked,
3453 tp1->rec.data.TSN_seq,
3455 SCTP_FR_LOG_STRIKE_CHUNK);
3461 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3462 /* Increment the count to resend */
3463 struct sctp_nets *alt;
3465 /* printf("OK, we are now ready to FR this guy\n"); */
3466 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
3467 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3471 /* This is a subsequent FR */
3472 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3474 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3475 if (sctp_cmt_on_off) {
3477 * CMT: Using RTX_SSTHRESH policy for CMT.
3478 * If CMT is being used, then pick dest with
3479 * largest ssthresh for any retransmission.
3481 tp1->no_fr_allowed = 1;
3483 /* sa_ignore NO_NULL_CHK */
3484 if (sctp_cmt_on_off && sctp_cmt_pf) {
3486 * JRS 5/18/07 - If CMT PF is on,
3487 * use the PF version of
3490 alt = sctp_find_alternate_net(stcb, alt, 2);
3493 * JRS 5/18/07 - If only CMT is on,
3494 * use the CMT version of
3497 /* sa_ignore NO_NULL_CHK */
3498 alt = sctp_find_alternate_net(stcb, alt, 1);
3504 * CUCv2: If a different dest is picked for
3505 * the retransmission, then new
3506 * (rtx-)pseudo_cumack needs to be tracked
3507 * for orig dest. Let CUCv2 track new (rtx-)
3508 * pseudo-cumack always.
3511 tp1->whoTo->find_pseudo_cumack = 1;
3512 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3514 } else {/* CMT is OFF */
3516 #ifdef SCTP_FR_TO_ALTERNATE
3517 /* Can we find an alternate? */
3518 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3521 * default behavior is to NOT retransmit
3522 * FR's to an alternate. Armando Caro's
3523 * paper details why.
3529 tp1->rec.data.doing_fast_retransmit = 1;
3531 /* mark the sending seq for possible subsequent FR's */
3533 * printf("Marking TSN for FR new value %x\n",
3534 * (uint32_t)tpi->rec.data.TSN_seq);
3536 if (TAILQ_EMPTY(&asoc->send_queue)) {
3538 * If the queue of send is empty then its
3539 * the next sequence number that will be
3540 * assigned so we subtract one from this to
3541 * get the one we last sent.
3543 tp1->rec.data.fast_retran_tsn = sending_seq;
3546 * If there are chunks on the send queue
3547 * (unsent data that has made it from the
3548 * stream queues but not out the door, we
3549 * take the first one (which will have the
3550 * lowest TSN) and subtract one to get the
3553 struct sctp_tmit_chunk *ttt;
3555 ttt = TAILQ_FIRST(&asoc->send_queue);
3556 tp1->rec.data.fast_retran_tsn =
3557 ttt->rec.data.TSN_seq;
3562 * this guy had a RTO calculation pending on
3567 /* fix counts and things */
3568 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
3569 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3570 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3572 (uintptr_t) tp1->whoTo,
3573 tp1->rec.data.TSN_seq);
3576 tp1->whoTo->net_ack++;
3577 sctp_flight_size_decrease(tp1);
3579 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) {
3580 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3581 asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh);
3583 /* add back to the rwnd */
3584 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
3586 /* remove from the total flight */
3587 sctp_total_flight_decrease(stcb, tp1);
3588 if (alt != tp1->whoTo) {
3589 /* yes, there is an alternate. */
3590 sctp_free_remote_addr(tp1->whoTo);
3591 /* sa_ignore FREED_MEMORY */
3593 atomic_add_int(&alt->ref_count, 1);
3596 tp1 = TAILQ_NEXT(tp1, sctp_next);
3599 if (tot_retrans > 0) {
3601 * Setup the ecn nonce re-sync point. We do this since once
3602 * we go to FR something we introduce a Karn's rule scenario
3603 * and won't know the totals for the ECN bits.
3605 asoc->nonce_resync_tsn = sending_seq;
3606 asoc->nonce_wait_for_ecne = 0;
3607 asoc->nonce_sum_check = 0;
3611 struct sctp_tmit_chunk *
3612 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3613 struct sctp_association *asoc)
3615 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3619 if (asoc->peer_supports_prsctp == 0) {
3622 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3624 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3625 tp1->sent != SCTP_DATAGRAM_RESEND) {
3626 /* no chance to advance, out of here */
3629 if (!PR_SCTP_ENABLED(tp1->flags)) {
3631 * We can't fwd-tsn past any that are reliable aka
3632 * retransmitted until the asoc fails.
3637 (void)SCTP_GETTIME_TIMEVAL(&now);
3640 tp2 = TAILQ_NEXT(tp1, sctp_next);
3642 * now we got a chunk which is marked for another
3643 * retransmission to a PR-stream but has run out its chances
3644 * already maybe OR has been marked to skip now. Can we skip
3645 * it if its a resend?
3647 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3648 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3650 * Now is this one marked for resend and its time is
3653 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3654 /* Yes so drop it */
3656 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3657 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3658 &asoc->sent_queue, SCTP_SO_NOT_LOCKED);
3662 * No, we are done when hit one for resend
3663 * whos time as not expired.
3669 * Ok now if this chunk is marked to drop it we can clean up
3670 * the chunk, advance our peer ack point and we can check
3673 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3674 /* advance PeerAckPoint goes forward */
3675 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3678 * we don't want to de-queue it here. Just wait for
3679 * the next peer SACK to come with a new cumTSN and
3680 * then the chunk will be droped in the normal
3684 sctp_free_bufspace(stcb, asoc, tp1, 1);
3686 * Maybe there should be another
3689 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3690 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3691 tp1, SCTP_SO_NOT_LOCKED);
3692 sctp_m_freem(tp1->data);
3694 if (stcb->sctp_socket) {
3695 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3698 so = SCTP_INP_SO(stcb->sctp_ep);
3699 atomic_add_int(&stcb->asoc.refcnt, 1);
3700 SCTP_TCB_UNLOCK(stcb);
3701 SCTP_SOCKET_LOCK(so, 1);
3702 SCTP_TCB_LOCK(stcb);
3703 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3704 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3706 * assoc was freed while we
3709 SCTP_SOCKET_UNLOCK(so, 1);
3713 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3714 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3715 SCTP_SOCKET_UNLOCK(so, 1);
3717 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) {
3718 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN);
3724 * If it is still in RESEND we can advance no
3730 * If we hit here we just dumped tp1, move to next tsn on
3739 sctp_fs_audit(struct sctp_association *asoc)
3741 struct sctp_tmit_chunk *chk;
3742 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3744 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3745 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3747 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3749 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3751 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3758 if ((inflight > 0) || (inbetween > 0)) {
3760 panic("Flight size-express incorrect? \n");
3762 SCTP_PRINTF("Flight size-express incorrect inflight:%d inbetween:%d\n",
3763 inflight, inbetween);
3770 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3771 struct sctp_association *asoc,
3772 struct sctp_nets *net,
3773 struct sctp_tmit_chunk *tp1)
3775 struct sctp_tmit_chunk *chk;
3777 /* First setup this one and get it moved back */
3778 tp1->sent = SCTP_DATAGRAM_UNSENT;
3779 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
3780 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3781 tp1->whoTo->flight_size,
3783 (uintptr_t) tp1->whoTo,
3784 tp1->rec.data.TSN_seq);
3786 sctp_flight_size_decrease(tp1);
3787 sctp_total_flight_decrease(stcb, tp1);
3788 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3789 TAILQ_INSERT_HEAD(&asoc->send_queue, tp1, sctp_next);
3790 asoc->sent_queue_cnt--;
3791 asoc->send_queue_cnt++;
3793 * Now all guys marked for RESEND on the sent_queue must be moved
3796 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3797 if (chk->sent == SCTP_DATAGRAM_RESEND) {
3798 /* Another chunk to move */
3799 chk->sent = SCTP_DATAGRAM_UNSENT;
3800 /* It should not be in flight */
3801 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3802 TAILQ_INSERT_AFTER(&asoc->send_queue, tp1, chk, sctp_next);
3803 asoc->sent_queue_cnt--;
3804 asoc->send_queue_cnt++;
3805 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3811 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3812 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3814 struct sctp_nets *net;
3815 struct sctp_association *asoc;
3816 struct sctp_tmit_chunk *tp1, *tp2;
3818 int win_probe_recovery = 0;
3819 int win_probe_recovered = 0;
3820 int j, done_once = 0;
3822 if (sctp_logging_level & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3823 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3824 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3826 SCTP_TCB_LOCK_ASSERT(stcb);
3827 #ifdef SCTP_ASOCLOG_OF_TSNS
3828 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3829 stcb->asoc.cumack_log_at++;
3830 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3831 stcb->asoc.cumack_log_at = 0;
3835 old_rwnd = asoc->peers_rwnd;
3836 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
3839 } else if (asoc->last_acked_seq == cumack) {
3840 /* Window update sack */
3841 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3842 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
3843 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3844 /* SWS sender side engages */
3845 asoc->peers_rwnd = 0;
3847 if (asoc->peers_rwnd > old_rwnd) {
3852 /* First setup for CC stuff */
3853 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3854 net->prev_cwnd = net->cwnd;
3859 * CMT: Reset CUC and Fast recovery algo variables before
3862 net->new_pseudo_cumack = 0;
3863 net->will_exit_fast_recovery = 0;
3865 if (sctp_strict_sacks) {
3868 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3869 tp1 = TAILQ_LAST(&asoc->sent_queue,
3870 sctpchunk_listhead);
3871 send_s = tp1->rec.data.TSN_seq + 1;
3873 send_s = asoc->sending_seq;
3875 if ((cumack == send_s) ||
3876 compare_with_wrap(cumack, send_s, MAX_TSN)) {
3882 panic("Impossible sack 1");
3886 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3887 0, M_DONTWAIT, 1, MT_DATA);
3889 struct sctp_paramhdr *ph;
3892 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3894 ph = mtod(oper, struct sctp_paramhdr *);
3895 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3896 ph->param_length = htons(SCTP_BUF_LEN(oper));
3897 ippp = (uint32_t *) (ph + 1);
3898 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3900 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3901 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3906 asoc->this_sack_highest_gap = cumack;
3907 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
3908 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3909 stcb->asoc.overall_error_count,
3911 SCTP_FROM_SCTP_INDATA,
3914 stcb->asoc.overall_error_count = 0;
3915 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
3916 /* process the new consecutive TSN first */
3917 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3919 tp2 = TAILQ_NEXT(tp1, sctp_next);
3920 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
3922 cumack == tp1->rec.data.TSN_seq) {
3923 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3924 printf("Warning, an unsent is now acked?\n");
3927 * ECN Nonce: Add the nonce to the sender's
3930 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3931 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3933 * If it is less than ACKED, it is
3934 * now no-longer in flight. Higher
3935 * values may occur during marking
3937 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3938 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
3939 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3940 tp1->whoTo->flight_size,
3942 (uintptr_t) tp1->whoTo,
3943 tp1->rec.data.TSN_seq);
3945 sctp_flight_size_decrease(tp1);
3946 sctp_total_flight_decrease(stcb, tp1);
3948 tp1->whoTo->net_ack += tp1->send_size;
3949 if (tp1->snd_count < 2) {
3951 * True non-retransmited
3954 tp1->whoTo->net_ack2 +=
3957 /* update RTO too? */
3960 sctp_calculate_rto(stcb,
3962 &tp1->sent_rcv_time,
3963 sctp_align_safe_nocopy);
3968 * CMT: CUCv2 algorithm. From the
3969 * cumack'd TSNs, for each TSN being
3970 * acked for the first time, set the
3971 * following variables for the
3972 * corresp destination.
3973 * new_pseudo_cumack will trigger a
3975 * find_(rtx_)pseudo_cumack will
3976 * trigger search for the next
3977 * expected (rtx-)pseudo-cumack.
3979 tp1->whoTo->new_pseudo_cumack = 1;
3980 tp1->whoTo->find_pseudo_cumack = 1;
3981 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3983 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
3984 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3987 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3988 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3990 if (tp1->rec.data.chunk_was_revoked) {
3991 /* deflate the cwnd */
3992 tp1->whoTo->cwnd -= tp1->book_size;
3993 tp1->rec.data.chunk_was_revoked = 0;
3995 tp1->sent = SCTP_DATAGRAM_ACKED;
3996 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3998 sctp_free_bufspace(stcb, asoc, tp1, 1);
3999 sctp_m_freem(tp1->data);
4001 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
4002 sctp_log_sack(asoc->last_acked_seq,
4004 tp1->rec.data.TSN_seq,
4007 SCTP_LOG_FREE_SENT);
4010 asoc->sent_queue_cnt--;
4011 sctp_free_a_chunk(stcb, tp1);
4019 if (stcb->sctp_socket) {
4020 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4025 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4026 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) {
4027 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4029 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4030 so = SCTP_INP_SO(stcb->sctp_ep);
4031 atomic_add_int(&stcb->asoc.refcnt, 1);
4032 SCTP_TCB_UNLOCK(stcb);
4033 SCTP_SOCKET_LOCK(so, 1);
4034 SCTP_TCB_LOCK(stcb);
4035 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4036 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4037 /* assoc was freed while we were unlocked */
4038 SCTP_SOCKET_UNLOCK(so, 1);
4042 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4043 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4044 SCTP_SOCKET_UNLOCK(so, 1);
4047 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) {
4048 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4052 /* JRS - Use the congestion control given in the CC module */
4053 if (asoc->last_acked_seq != cumack)
4054 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4056 asoc->last_acked_seq = cumack;
4058 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4059 /* nothing left in-flight */
4060 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4061 net->flight_size = 0;
4062 net->partial_bytes_acked = 0;
4064 asoc->total_flight = 0;
4065 asoc->total_flight_count = 0;
4067 /* Fix up the a-p-a-p for future PR-SCTP sends */
4068 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4069 asoc->advanced_peer_ack_point = cumack;
4071 /* ECN Nonce updates */
4072 if (asoc->ecn_nonce_allowed) {
4073 if (asoc->nonce_sum_check) {
4074 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4075 if (asoc->nonce_wait_for_ecne == 0) {
4076 struct sctp_tmit_chunk *lchk;
4078 lchk = TAILQ_FIRST(&asoc->send_queue);
4079 asoc->nonce_wait_for_ecne = 1;
4081 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4083 asoc->nonce_wait_tsn = asoc->sending_seq;
4086 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4087 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4089 * Misbehaving peer. We need
4090 * to react to this guy
4092 asoc->ecn_allowed = 0;
4093 asoc->ecn_nonce_allowed = 0;
4098 /* See if Resynchronization Possible */
4099 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4100 asoc->nonce_sum_check = 1;
4102 * now we must calculate what the base is.
4103 * We do this based on two things, we know
4104 * the total's for all the segments
4105 * gap-acked in the SACK (none), We also
4106 * know the SACK's nonce sum, its in
4107 * nonce_sum_flag. So we can build a truth
4108 * table to back-calculate the new value of
4109 * asoc->nonce_sum_expect_base:
4111 * SACK-flag-Value Seg-Sums Base 0 0 0
4115 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4120 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4121 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4122 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4123 /* SWS sender side engages */
4124 asoc->peers_rwnd = 0;
4126 if (asoc->peers_rwnd > old_rwnd) {
4127 win_probe_recovery = 1;
4129 /* Now assure a timer where data is queued at */
4132 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4133 if (win_probe_recovery && (net->window_probe)) {
4134 net->window_probe = 0;
4135 win_probe_recovered = 1;
4137 * Find first chunk that was used with window probe
4138 * and clear the sent
4140 /* sa_ignore FREED_MEMORY */
4141 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4142 if (tp1->window_probe) {
4143 /* move back to data send queue */
4144 sctp_window_probe_recovery(stcb, asoc, net, tp1);
4149 if (net->flight_size) {
4152 if (net->RTO == 0) {
4153 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4155 to_ticks = MSEC_TO_TICKS(net->RTO);
4158 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4159 sctp_timeout_handler, &net->rxt_timer);
4161 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4162 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4164 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4166 if (sctp_early_fr) {
4167 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4168 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4169 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4170 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4176 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4177 (asoc->sent_queue_retran_cnt == 0) &&
4178 (win_probe_recovered == 0) &&
4180 /* huh, this should not happen */
4181 sctp_fs_audit(asoc);
4182 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4183 net->flight_size = 0;
4185 asoc->total_flight = 0;
4186 asoc->total_flight_count = 0;
4187 asoc->sent_queue_retran_cnt = 0;
4188 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4189 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4190 sctp_flight_size_increase(tp1);
4191 sctp_total_flight_increase(stcb, tp1);
4192 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4193 asoc->sent_queue_retran_cnt++;
4199 /**********************************/
4200 /* Now what about shutdown issues */
4201 /**********************************/
4202 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4203 /* nothing left on sendqueue.. consider done */
4205 if ((asoc->stream_queue_cnt == 1) &&
4206 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4207 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4208 (asoc->locked_on_sending)
4210 struct sctp_stream_queue_pending *sp;
4213 * I may be in a state where we got all across.. but
4214 * cannot write more due to a shutdown... we abort
4215 * since the user did not indicate EOR in this case.
4216 * The sp will be cleaned during free of the asoc.
4218 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4220 if ((sp) && (sp->length == 0)) {
4221 /* Let cleanup code purge it */
4222 if (sp->msg_is_complete) {
4223 asoc->stream_queue_cnt--;
4225 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4226 asoc->locked_on_sending = NULL;
4227 asoc->stream_queue_cnt--;
4231 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4232 (asoc->stream_queue_cnt == 0)) {
4233 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4234 /* Need to abort here */
4240 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4241 0, M_DONTWAIT, 1, MT_DATA);
4243 struct sctp_paramhdr *ph;
4246 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4248 ph = mtod(oper, struct sctp_paramhdr *);
4249 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4250 ph->param_length = htons(SCTP_BUF_LEN(oper));
4251 ippp = (uint32_t *) (ph + 1);
4252 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4254 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4255 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4257 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4258 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4259 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4261 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4262 sctp_stop_timers_for_shutdown(stcb);
4263 sctp_send_shutdown(stcb,
4264 stcb->asoc.primary_destination);
4265 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4266 stcb->sctp_ep, stcb, asoc->primary_destination);
4267 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4268 stcb->sctp_ep, stcb, asoc->primary_destination);
4270 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4271 (asoc->stream_queue_cnt == 0)) {
4272 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4275 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4276 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4277 sctp_send_shutdown_ack(stcb,
4278 stcb->asoc.primary_destination);
4280 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4281 stcb->sctp_ep, stcb, asoc->primary_destination);
4284 if (sctp_logging_level & SCTP_SACK_RWND_LOGGING_ENABLE) {
4285 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4287 stcb->asoc.peers_rwnd,
4288 stcb->asoc.total_flight,
4289 stcb->asoc.total_output_queue_size);
4294 sctp_handle_sack(struct mbuf *m, int offset,
4295 struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4296 struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd)
4298 struct sctp_association *asoc;
4299 struct sctp_sack *sack;
4300 struct sctp_tmit_chunk *tp1, *tp2;
4301 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4302 this_sack_lowest_newack;
4303 uint32_t sav_cum_ack;
4304 uint16_t num_seg, num_dup;
4305 uint16_t wake_him = 0;
4306 unsigned int sack_length;
4307 uint32_t send_s = 0;
4309 int accum_moved = 0;
4310 int will_exit_fast_recovery = 0;
4311 uint32_t a_rwnd, old_rwnd;
4312 int win_probe_recovery = 0;
4313 int win_probe_recovered = 0;
4314 struct sctp_nets *net = NULL;
4315 int nonce_sum_flag, ecn_seg_sums = 0;
4317 uint8_t reneged_all = 0;
4318 uint8_t cmt_dac_flag;
4321 * we take any chance we can to service our queues since we cannot
4322 * get awoken when the socket is read from :<
4325 * Now perform the actual SACK handling: 1) Verify that it is not an
4326 * old sack, if so discard. 2) If there is nothing left in the send
4327 * queue (cum-ack is equal to last acked) then you have a duplicate
4328 * too, update any rwnd change and verify no timers are running.
4329 * then return. 3) Process any new consequtive data i.e. cum-ack
4330 * moved process these first and note that it moved. 4) Process any
4331 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4332 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4333 * sync up flightsizes and things, stop all timers and also check
4334 * for shutdown_pending state. If so then go ahead and send off the
4335 * shutdown. If in shutdown recv, send off the shutdown-ack and
4336 * start that timer, Ret. 9) Strike any non-acked things and do FR
4337 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4338 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4339 * if in shutdown_recv state.
4341 SCTP_TCB_LOCK_ASSERT(stcb);
4344 this_sack_lowest_newack = 0;
4346 sack_length = (unsigned int)sack_len;
4348 SCTP_STAT_INCR(sctps_slowpath_sack);
4349 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4350 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4351 #ifdef SCTP_ASOCLOG_OF_TSNS
4352 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4353 stcb->asoc.cumack_log_at++;
4354 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4355 stcb->asoc.cumack_log_at = 0;
4358 num_seg = ntohs(sack->num_gap_ack_blks);
4361 if (sctp_logging_level & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4362 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4363 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4366 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4367 num_dup = ntohs(sack->num_dup_tsns);
4369 old_rwnd = stcb->asoc.peers_rwnd;
4370 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) {
4371 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4372 stcb->asoc.overall_error_count,
4374 SCTP_FROM_SCTP_INDATA,
4377 stcb->asoc.overall_error_count = 0;
4379 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
4380 sctp_log_sack(asoc->last_acked_seq,
4387 if ((num_dup) && (sctp_logging_level & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4388 int off_to_dup, iii;
4389 uint32_t *dupdata, dblock;
4391 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4392 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4393 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4394 sizeof(uint32_t), (uint8_t *) & dblock);
4395 off_to_dup += sizeof(uint32_t);
4397 for (iii = 0; iii < num_dup; iii++) {
4398 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4399 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4400 sizeof(uint32_t), (uint8_t *) & dblock);
4401 if (dupdata == NULL)
4403 off_to_dup += sizeof(uint32_t);
4407 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4408 off_to_dup, num_dup, sack_length, num_seg);
4411 if (sctp_strict_sacks) {
4413 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4414 tp1 = TAILQ_LAST(&asoc->sent_queue,
4415 sctpchunk_listhead);
4416 send_s = tp1->rec.data.TSN_seq + 1;
4418 send_s = asoc->sending_seq;
4420 if (cum_ack == send_s ||
4421 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4428 panic("Impossible sack 1");
4433 * no way, we have not even sent this TSN out yet.
4434 * Peer is hopelessly messed up with us.
4439 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4440 0, M_DONTWAIT, 1, MT_DATA);
4442 struct sctp_paramhdr *ph;
4445 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4447 ph = mtod(oper, struct sctp_paramhdr *);
4448 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4449 ph->param_length = htons(SCTP_BUF_LEN(oper));
4450 ippp = (uint32_t *) (ph + 1);
4451 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4453 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4454 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4459 /**********************/
4460 /* 1) check the range */
4461 /**********************/
4462 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4463 /* acking something behind */
4466 sav_cum_ack = asoc->last_acked_seq;
4468 /* update the Rwnd of the peer */
4469 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4470 TAILQ_EMPTY(&asoc->send_queue) &&
4471 (asoc->stream_queue_cnt == 0)
4473 /* nothing left on send/sent and strmq */
4474 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) {
4475 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4476 asoc->peers_rwnd, 0, 0, a_rwnd);
4478 asoc->peers_rwnd = a_rwnd;
4479 if (asoc->sent_queue_retran_cnt) {
4480 asoc->sent_queue_retran_cnt = 0;
4482 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4483 /* SWS sender side engages */
4484 asoc->peers_rwnd = 0;
4486 /* stop any timers */
4487 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4488 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4489 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4490 if (sctp_early_fr) {
4491 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4492 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4493 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4494 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4497 net->partial_bytes_acked = 0;
4498 net->flight_size = 0;
4500 asoc->total_flight = 0;
4501 asoc->total_flight_count = 0;
4505 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4506 * things. The total byte count acked is tracked in netAckSz AND
4507 * netAck2 is used to track the total bytes acked that are un-
4508 * amibguious and were never retransmitted. We track these on a per
4509 * destination address basis.
4511 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4512 net->prev_cwnd = net->cwnd;
4517 * CMT: Reset CUC and Fast recovery algo variables before
4520 net->new_pseudo_cumack = 0;
4521 net->will_exit_fast_recovery = 0;
4523 /* process the new consecutive TSN first */
4524 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4526 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4528 last_tsn == tp1->rec.data.TSN_seq) {
4529 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4531 * ECN Nonce: Add the nonce to the sender's
4534 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4536 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4538 * If it is less than ACKED, it is
4539 * now no-longer in flight. Higher
4540 * values may occur during marking
4542 if ((tp1->whoTo->dest_state &
4543 SCTP_ADDR_UNCONFIRMED) &&
4544 (tp1->snd_count < 2)) {
4546 * If there was no retran
4547 * and the address is
4548 * un-confirmed and we sent
4550 * sacked.. its confirmed,
4553 tp1->whoTo->dest_state &=
4554 ~SCTP_ADDR_UNCONFIRMED;
4556 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4557 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
4558 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4559 tp1->whoTo->flight_size,
4561 (uintptr_t) tp1->whoTo,
4562 tp1->rec.data.TSN_seq);
4564 sctp_flight_size_decrease(tp1);
4565 sctp_total_flight_decrease(stcb, tp1);
4567 tp1->whoTo->net_ack += tp1->send_size;
4569 /* CMT SFR and DAC algos */
4570 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4571 tp1->whoTo->saw_newack = 1;
4573 if (tp1->snd_count < 2) {
4575 * True non-retransmited
4578 tp1->whoTo->net_ack2 +=
4581 /* update RTO too? */
4584 sctp_calculate_rto(stcb,
4586 &tp1->sent_rcv_time,
4587 sctp_align_safe_nocopy);
4592 * CMT: CUCv2 algorithm. From the
4593 * cumack'd TSNs, for each TSN being
4594 * acked for the first time, set the
4595 * following variables for the
4596 * corresp destination.
4597 * new_pseudo_cumack will trigger a
4599 * find_(rtx_)pseudo_cumack will
4600 * trigger search for the next
4601 * expected (rtx-)pseudo-cumack.
4603 tp1->whoTo->new_pseudo_cumack = 1;
4604 tp1->whoTo->find_pseudo_cumack = 1;
4605 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4608 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
4609 sctp_log_sack(asoc->last_acked_seq,
4611 tp1->rec.data.TSN_seq,
4614 SCTP_LOG_TSN_ACKED);
4616 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
4617 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4620 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4621 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4622 #ifdef SCTP_AUDITING_ENABLED
4623 sctp_audit_log(0xB3,
4624 (asoc->sent_queue_retran_cnt & 0x000000ff));
4627 if (tp1->rec.data.chunk_was_revoked) {
4628 /* deflate the cwnd */
4629 tp1->whoTo->cwnd -= tp1->book_size;
4630 tp1->rec.data.chunk_was_revoked = 0;
4632 tp1->sent = SCTP_DATAGRAM_ACKED;
4637 tp1 = TAILQ_NEXT(tp1, sctp_next);
4639 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4640 /* always set this up to cum-ack */
4641 asoc->this_sack_highest_gap = last_tsn;
4643 /* Move offset up to point to gaps/dups */
4644 offset += sizeof(struct sctp_sack_chunk);
4645 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
4647 /* skip corrupt segments */
4653 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4654 * to be greater than the cumack. Also reset saw_newack to 0
4657 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4658 net->saw_newack = 0;
4659 net->this_sack_highest_newack = last_tsn;
4663 * thisSackHighestGap will increase while handling NEW
4664 * segments this_sack_highest_newack will increase while
4665 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4666 * used for CMT DAC algo. saw_newack will also change.
4668 sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn,
4669 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4670 num_seg, &ecn_seg_sums);
4672 if (sctp_strict_sacks) {
4674 * validate the biggest_tsn_acked in the gap acks if
4675 * strict adherence is wanted.
4677 if ((biggest_tsn_acked == send_s) ||
4678 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4680 * peer is either confused or we are under
4681 * attack. We must abort.
4688 /*******************************************/
4689 /* cancel ALL T3-send timer if accum moved */
4690 /*******************************************/
4691 if (sctp_cmt_on_off) {
4692 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4693 if (net->new_pseudo_cumack)
4694 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4696 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4701 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4702 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4703 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4707 /********************************************/
4708 /* drop the acked chunks from the sendqueue */
4709 /********************************************/
4710 asoc->last_acked_seq = cum_ack;
4712 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4716 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4720 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4721 /* no more sent on list */
4722 printf("Warning, tp1->sent == %d and its now acked?\n",
4725 tp2 = TAILQ_NEXT(tp1, sctp_next);
4726 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4727 if (tp1->pr_sctp_on) {
4728 if (asoc->pr_sctp_cnt != 0)
4729 asoc->pr_sctp_cnt--;
4731 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
4732 (asoc->total_flight > 0)) {
4734 panic("Warning flight size is postive and should be 0");
4736 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4737 asoc->total_flight);
4739 asoc->total_flight = 0;
4742 sctp_free_bufspace(stcb, asoc, tp1, 1);
4743 sctp_m_freem(tp1->data);
4744 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4745 asoc->sent_queue_cnt_removeable--;
4748 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
4749 sctp_log_sack(asoc->last_acked_seq,
4751 tp1->rec.data.TSN_seq,
4754 SCTP_LOG_FREE_SENT);
4757 asoc->sent_queue_cnt--;
4758 sctp_free_a_chunk(stcb, tp1);
4761 } while (tp1 != NULL);
4764 if ((wake_him) && (stcb->sctp_socket)) {
4765 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4769 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4770 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) {
4771 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4773 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4774 so = SCTP_INP_SO(stcb->sctp_ep);
4775 atomic_add_int(&stcb->asoc.refcnt, 1);
4776 SCTP_TCB_UNLOCK(stcb);
4777 SCTP_SOCKET_LOCK(so, 1);
4778 SCTP_TCB_LOCK(stcb);
4779 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4780 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4781 /* assoc was freed while we were unlocked */
4782 SCTP_SOCKET_UNLOCK(so, 1);
4786 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4787 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4788 SCTP_SOCKET_UNLOCK(so, 1);
4791 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) {
4792 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4796 if (asoc->fast_retran_loss_recovery && accum_moved) {
4797 if (compare_with_wrap(asoc->last_acked_seq,
4798 asoc->fast_recovery_tsn, MAX_TSN) ||
4799 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4800 /* Setup so we will exit RFC2582 fast recovery */
4801 will_exit_fast_recovery = 1;
4805 * Check for revoked fragments:
4807 * if Previous sack - Had no frags then we can't have any revoked if
4808 * Previous sack - Had frag's then - If we now have frags aka
4809 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4810 * some of them. else - The peer revoked all ACKED fragments, since
4811 * we had some before and now we have NONE.
4815 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4816 else if (asoc->saw_sack_with_frags) {
4817 int cnt_revoked = 0;
4819 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4821 /* Peer revoked all dg's marked or acked */
4822 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4823 if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
4824 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
4825 tp1->sent = SCTP_DATAGRAM_SENT;
4826 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
4827 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4828 tp1->whoTo->flight_size,
4830 (uintptr_t) tp1->whoTo,
4831 tp1->rec.data.TSN_seq);
4833 sctp_flight_size_increase(tp1);
4834 sctp_total_flight_increase(stcb, tp1);
4835 tp1->rec.data.chunk_was_revoked = 1;
4837 * To ensure that this increase in
4838 * flightsize, which is artificial,
4839 * does not throttle the sender, we
4840 * also increase the cwnd
4843 tp1->whoTo->cwnd += tp1->book_size;
4851 asoc->saw_sack_with_frags = 0;
4854 asoc->saw_sack_with_frags = 1;
4856 asoc->saw_sack_with_frags = 0;
4858 /* JRS - Use the congestion control given in the CC module */
4859 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4861 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4862 /* nothing left in-flight */
4863 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4864 /* stop all timers */
4865 if (sctp_early_fr) {
4866 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4867 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4868 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4869 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4872 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4873 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4874 net->flight_size = 0;
4875 net->partial_bytes_acked = 0;
4877 asoc->total_flight = 0;
4878 asoc->total_flight_count = 0;
4880 /**********************************/
4881 /* Now what about shutdown issues */
4882 /**********************************/
4883 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4884 /* nothing left on sendqueue.. consider done */
4885 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) {
4886 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4887 asoc->peers_rwnd, 0, 0, a_rwnd);
4889 asoc->peers_rwnd = a_rwnd;
4890 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4891 /* SWS sender side engages */
4892 asoc->peers_rwnd = 0;
4895 if ((asoc->stream_queue_cnt == 1) &&
4896 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4897 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4898 (asoc->locked_on_sending)
4900 struct sctp_stream_queue_pending *sp;
4903 * I may be in a state where we got all across.. but
4904 * cannot write more due to a shutdown... we abort
4905 * since the user did not indicate EOR in this case.
4907 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4909 if ((sp) && (sp->length == 0)) {
4910 asoc->locked_on_sending = NULL;
4911 if (sp->msg_is_complete) {
4912 asoc->stream_queue_cnt--;
4914 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4915 asoc->stream_queue_cnt--;
4919 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4920 (asoc->stream_queue_cnt == 0)) {
4921 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4922 /* Need to abort here */
4928 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4929 0, M_DONTWAIT, 1, MT_DATA);
4931 struct sctp_paramhdr *ph;
4934 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4936 ph = mtod(oper, struct sctp_paramhdr *);
4937 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4938 ph->param_length = htons(SCTP_BUF_LEN(oper));
4939 ippp = (uint32_t *) (ph + 1);
4940 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4942 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4943 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4946 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4947 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4948 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4950 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4951 sctp_stop_timers_for_shutdown(stcb);
4952 sctp_send_shutdown(stcb,
4953 stcb->asoc.primary_destination);
4954 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4955 stcb->sctp_ep, stcb, asoc->primary_destination);
4956 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4957 stcb->sctp_ep, stcb, asoc->primary_destination);
4960 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4961 (asoc->stream_queue_cnt == 0)) {
4962 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4965 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4966 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4967 sctp_send_shutdown_ack(stcb,
4968 stcb->asoc.primary_destination);
4970 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4971 stcb->sctp_ep, stcb, asoc->primary_destination);
4976 * Now here we are going to recycle net_ack for a different use...
4979 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4984 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4985 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4986 * automatically ensure that.
4988 if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) {
4989 this_sack_lowest_newack = cum_ack;
4992 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4993 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4995 /*********************************************/
4996 /* Here we perform PR-SCTP procedures */
4998 /*********************************************/
4999 /* C1. update advancedPeerAckPoint */
5000 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5001 asoc->advanced_peer_ack_point = cum_ack;
5003 /* C2. try to further move advancedPeerAckPoint ahead */
5004 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5005 struct sctp_tmit_chunk *lchk;
5007 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5008 /* C3. See if we need to send a Fwd-TSN */
5009 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5012 * ISSUE with ECN, see FWD-TSN processing for notes
5013 * on issues that will occur when the ECN NONCE
5014 * stuff is put into SCTP for cross checking.
5016 send_forward_tsn(stcb, asoc);
5019 * ECN Nonce: Disable Nonce Sum check when FWD TSN
5020 * is sent and store resync tsn
5022 asoc->nonce_sum_check = 0;
5023 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5025 /* Assure a timer is up */
5026 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5027 stcb->sctp_ep, stcb, lchk->whoTo);
5031 /* JRS - Use the congestion control given in the CC module */
5032 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5034 /******************************************************************
5035 * Here we do the stuff with ECN Nonce checking.
5036 * We basically check to see if the nonce sum flag was incorrect
5037 * or if resynchronization needs to be done. Also if we catch a
5038 * misbehaving receiver we give him the kick.
5039 ******************************************************************/
5041 if (asoc->ecn_nonce_allowed) {
5042 if (asoc->nonce_sum_check) {
5043 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5044 if (asoc->nonce_wait_for_ecne == 0) {
5045 struct sctp_tmit_chunk *lchk;
5047 lchk = TAILQ_FIRST(&asoc->send_queue);
5048 asoc->nonce_wait_for_ecne = 1;
5050 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5052 asoc->nonce_wait_tsn = asoc->sending_seq;
5055 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5056 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5058 * Misbehaving peer. We need
5059 * to react to this guy
5061 asoc->ecn_allowed = 0;
5062 asoc->ecn_nonce_allowed = 0;
5067 /* See if Resynchronization Possible */
5068 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5069 asoc->nonce_sum_check = 1;
5071 * now we must calculate what the base is.
5072 * We do this based on two things, we know
5073 * the total's for all the segments
5074 * gap-acked in the SACK, its stored in
5075 * ecn_seg_sums. We also know the SACK's
5076 * nonce sum, its in nonce_sum_flag. So we
5077 * can build a truth table to back-calculate
5079 * asoc->nonce_sum_expect_base:
5081 * SACK-flag-Value Seg-Sums Base 0 0 0
5085 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5089 /* Now are we exiting loss recovery ? */
5090 if (will_exit_fast_recovery) {
5091 /* Ok, we must exit fast recovery */
5092 asoc->fast_retran_loss_recovery = 0;
5094 if ((asoc->sat_t3_loss_recovery) &&
5095 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5097 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5098 /* end satellite t3 loss recovery */
5099 asoc->sat_t3_loss_recovery = 0;
5104 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5105 if (net->will_exit_fast_recovery) {
5106 /* Ok, we must exit fast recovery */
5107 net->fast_retran_loss_recovery = 0;
5111 /* Adjust and set the new rwnd value */
5112 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) {
5113 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5114 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
5116 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5117 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
5118 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5119 /* SWS sender side engages */
5120 asoc->peers_rwnd = 0;
5122 if (asoc->peers_rwnd > old_rwnd) {
5123 win_probe_recovery = 1;
5126 * Now we must setup so we have a timer up for anyone with
5132 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5133 if (win_probe_recovery && (net->window_probe)) {
5134 net->window_probe = 0;
5135 win_probe_recovered = 1;
5137 * Find first chunk that was used with
5138 * window probe and clear the event. Put
5139 * it back into the send queue as if has
5142 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5143 if (tp1->window_probe) {
5144 sctp_window_probe_recovery(stcb, asoc, net, tp1);
5149 if (net->flight_size) {
5151 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5152 stcb->sctp_ep, stcb, net);
5154 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5155 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5157 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5159 if (sctp_early_fr) {
5160 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5161 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5162 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5163 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5169 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5170 (asoc->sent_queue_retran_cnt == 0) &&
5171 (win_probe_recovered == 0) &&
5173 /* huh, this should not happen */
5174 sctp_fs_audit(asoc);
5175 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5176 net->flight_size = 0;
5178 asoc->total_flight = 0;
5179 asoc->total_flight_count = 0;
5180 asoc->sent_queue_retran_cnt = 0;
5181 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5182 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5183 sctp_flight_size_increase(tp1);
5184 sctp_total_flight_increase(stcb, tp1);
5185 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5186 asoc->sent_queue_retran_cnt++;
5192 if (sctp_logging_level & SCTP_SACK_RWND_LOGGING_ENABLE) {
5193 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5195 stcb->asoc.peers_rwnd,
5196 stcb->asoc.total_flight,
5197 stcb->asoc.total_output_queue_size);
5202 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5203 struct sctp_nets *netp, int *abort_flag)
5206 uint32_t cum_ack, a_rwnd;
5208 cum_ack = ntohl(cp->cumulative_tsn_ack);
5209 /* Arrange so a_rwnd does NOT change */
5210 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5212 /* Now call the express sack handling */
5213 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5217 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5218 struct sctp_stream_in *strmin)
5220 struct sctp_queued_to_read *ctl, *nctl;
5221 struct sctp_association *asoc;
5225 tt = strmin->last_sequence_delivered;
5227 * First deliver anything prior to and including the stream no that
5230 ctl = TAILQ_FIRST(&strmin->inqueue);
5232 nctl = TAILQ_NEXT(ctl, next);
5233 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5234 (tt == ctl->sinfo_ssn)) {
5235 /* this is deliverable now */
5236 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5237 /* subtract pending on streams */
5238 asoc->size_on_all_streams -= ctl->length;
5239 sctp_ucount_decr(asoc->cnt_on_all_streams);
5240 /* deliver it to at least the delivery-q */
5241 if (stcb->sctp_socket) {
5242 sctp_add_to_readq(stcb->sctp_ep, stcb,
5244 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5247 /* no more delivery now. */
5253 * now we must deliver things in queue the normal way if any are
5256 tt = strmin->last_sequence_delivered + 1;
5257 ctl = TAILQ_FIRST(&strmin->inqueue);
5259 nctl = TAILQ_NEXT(ctl, next);
5260 if (tt == ctl->sinfo_ssn) {
5261 /* this is deliverable now */
5262 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5263 /* subtract pending on streams */
5264 asoc->size_on_all_streams -= ctl->length;
5265 sctp_ucount_decr(asoc->cnt_on_all_streams);
5266 /* deliver it to at least the delivery-q */
5267 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5268 if (stcb->sctp_socket) {
5269 sctp_add_to_readq(stcb->sctp_ep, stcb,
5271 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5273 tt = strmin->last_sequence_delivered + 1;
5282 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5283 struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
5286 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5287 * forward TSN, when the SACK comes back that acknowledges the
5288 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5289 * get quite tricky since we may have sent more data interveneing
5290 * and must carefully account for what the SACK says on the nonce
5291 * and any gaps that are reported. This work will NOT be done here,
5292 * but I note it here since it is really related to PR-SCTP and
5296 /* The pr-sctp fwd tsn */
5298 * here we will perform all the data receiver side steps for
5299 * processing FwdTSN, as required in by pr-sctp draft:
5301 * Assume we get FwdTSN(x):
5303 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5304 * others we have 3) examine and update re-ordering queue on
5305 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5306 * report where we are.
5308 struct sctp_association *asoc;
5309 uint32_t new_cum_tsn, gap;
5310 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
5311 struct sctp_stream_in *strm;
5312 struct sctp_tmit_chunk *chk, *at;
5314 cumack_set_flag = 0;
5317 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5318 SCTPDBG(SCTP_DEBUG_INDATA1,
5319 "Bad size too small/big fwd-tsn\n");
5322 m_size = (stcb->asoc.mapping_array_size << 3);
5323 /*************************************************************/
5324 /* 1. Here we update local cumTSN and shift the bitmap array */
5325 /*************************************************************/
5326 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5328 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5329 asoc->cumulative_tsn == new_cum_tsn) {
5330 /* Already got there ... */
5333 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
5335 asoc->highest_tsn_inside_map = new_cum_tsn;
5336 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
5337 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5341 * now we know the new TSN is more advanced, let's find the actual
5344 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
5346 (new_cum_tsn == asoc->mapping_array_base_tsn)) {
5347 gap = new_cum_tsn - asoc->mapping_array_base_tsn;
5349 /* try to prevent underflow here */
5350 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
5353 if (gap >= m_size) {
5354 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
5355 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5357 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5361 * out of range (of single byte chunks in the rwnd I
5362 * give out). This must be an attacker.
5365 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5366 0, M_DONTWAIT, 1, MT_DATA);
5368 struct sctp_paramhdr *ph;
5371 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5372 (sizeof(uint32_t) * 3);
5373 ph = mtod(oper, struct sctp_paramhdr *);
5374 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5375 ph->param_length = htons(SCTP_BUF_LEN(oper));
5376 ippp = (uint32_t *) (ph + 1);
5377 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5379 *ippp = asoc->highest_tsn_inside_map;
5381 *ippp = new_cum_tsn;
5383 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5384 sctp_abort_an_association(stcb->sctp_ep, stcb,
5385 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5388 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5390 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5391 cumack_set_flag = 1;
5392 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5393 asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
5395 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
5396 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5398 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5400 SCTP_TCB_LOCK_ASSERT(stcb);
5401 if ((compare_with_wrap(((uint32_t) asoc->cumulative_tsn + gap), asoc->highest_tsn_inside_map, MAX_TSN)) ||
5402 (((uint32_t) asoc->cumulative_tsn + gap) == asoc->highest_tsn_inside_map)) {
5405 for (i = 0; i <= gap; i++) {
5406 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
5410 * Now after marking all, slide thing forward but no sack
5413 sctp_sack_check(stcb, 0, 0, abort_flag);
5418 /*************************************************************/
5419 /* 2. Clear up re-assembly queue */
5420 /*************************************************************/
5422 * First service it if pd-api is up, just in case we can progress it
5425 if (asoc->fragmented_delivery_inprogress) {
5426 sctp_service_reassembly(stcb, asoc);
5428 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5429 /* For each one on here see if we need to toss it */
5431 * For now large messages held on the reasmqueue that are
5432 * complete will be tossed too. We could in theory do more
5433 * work to spin through and stop after dumping one msg aka
5434 * seeing the start of a new msg at the head, and call the
5435 * delivery function... to see if it can be delivered... But
5436 * for now we just dump everything on the queue.
5438 chk = TAILQ_FIRST(&asoc->reasmqueue);
5440 at = TAILQ_NEXT(chk, sctp_next);
5441 if (compare_with_wrap(asoc->cumulative_tsn,
5442 chk->rec.data.TSN_seq, MAX_TSN) ||
5443 asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
5444 /* It needs to be tossed */
5445 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5446 if (compare_with_wrap(chk->rec.data.TSN_seq,
5447 asoc->tsn_last_delivered, MAX_TSN)) {
5448 asoc->tsn_last_delivered =
5449 chk->rec.data.TSN_seq;
5450 asoc->str_of_pdapi =
5451 chk->rec.data.stream_number;
5452 asoc->ssn_of_pdapi =
5453 chk->rec.data.stream_seq;
5454 asoc->fragment_flags =
5455 chk->rec.data.rcv_flags;
5457 asoc->size_on_reasm_queue -= chk->send_size;
5458 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5461 /* Clear up any stream problem */
5462 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5463 SCTP_DATA_UNORDERED &&
5464 (compare_with_wrap(chk->rec.data.stream_seq,
5465 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5468 * We must dump forward this streams
5469 * sequence number if the chunk is
5470 * not unordered that is being
5471 * skipped. There is a chance that
5472 * if the peer does not include the
5473 * last fragment in its FWD-TSN we
5474 * WILL have a problem here since
5475 * you would have a partial chunk in
5476 * queue that may not be
5477 * deliverable. Also if a Partial
5478 * delivery API as started the user
5479 * may get a partial chunk. The next
5480 * read returning a new chunk...
5481 * really ugly but I see no way
5482 * around it! Maybe a notify??
5484 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5485 chk->rec.data.stream_seq;
5488 sctp_m_freem(chk->data);
5491 sctp_free_a_chunk(stcb, chk);
5494 * Ok we have gone beyond the end of the
5495 * fwd-tsn's mark. Some checks...
5497 if ((asoc->fragmented_delivery_inprogress) &&
5498 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5502 * Special case PD-API is up and
5503 * what we fwd-tsn' over includes
5504 * one that had the LAST_FRAG. We no
5505 * longer need to do the PD-API.
5507 asoc->fragmented_delivery_inprogress = 0;
5509 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
5510 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5511 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
5519 if (asoc->fragmented_delivery_inprogress) {
5521 * Ok we removed cnt_gone chunks in the PD-API queue that
5522 * were being delivered. So now we must turn off the flag.
5526 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
5527 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5528 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
5529 asoc->fragmented_delivery_inprogress = 0;
5531 /*************************************************************/
5532 /* 3. Update the PR-stream re-ordering queues */
5533 /*************************************************************/
5534 fwd_sz -= sizeof(*fwd);
5537 unsigned int num_str;
5538 struct sctp_strseq *stseq, strseqbuf;
5540 offset += sizeof(*fwd);
5542 num_str = fwd_sz / sizeof(struct sctp_strseq);
5543 for (i = 0; i < num_str; i++) {
5547 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5548 sizeof(struct sctp_strseq),
5549 (uint8_t *) & strseqbuf);
5550 offset += sizeof(struct sctp_strseq);
5551 if (stseq == NULL) {
5555 xx = (unsigned char *)&stseq[i];
5556 st = ntohs(stseq[i].stream);
5557 stseq[i].stream = st;
5558 st = ntohs(stseq[i].sequence);
5559 stseq[i].sequence = st;
5561 if (stseq[i].stream >= asoc->streamincnt) {
5562 /* screwed up streams, stop! */
5565 strm = &asoc->strmin[stseq[i].stream];
5566 if (compare_with_wrap(stseq[i].sequence,
5567 strm->last_sequence_delivered, MAX_SEQ)) {
5568 /* Update the sequence number */
5569 strm->last_sequence_delivered =
5572 /* now kick the stream the new way */
5573 sctp_kick_prsctp_reorder_queue(stcb, strm);
5576 if (TAILQ_FIRST(&asoc->reasmqueue)) {
5577 /* now lets kick out and check for more fragmented delivery */
5578 sctp_deliver_reasm_check(stcb, &stcb->asoc);