2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
109 if (calc < stcb->asoc.my_rwnd_control_len) {
118 * Build out our readq entry based on the incoming packet.
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
128 struct sctp_queued_to_read *read_queue_e = NULL;
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = stcb->asoc.context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
157 return (read_queue_e);
162 * Build out our readq entry based on the incoming packet.
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
168 struct sctp_queued_to_read *read_queue_e = NULL;
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
197 return (read_queue_e);
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
203 struct sctp_sndrcvinfo *sinfo)
205 struct sctp_sndrcvinfo *outinfo;
209 int use_extended = 0;
211 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
212 /* user does not want the sndrcv ctl */
215 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
217 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
219 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
223 ret = sctp_get_mbuf_for_msg(len,
224 0, M_DONTWAIT, 1, MT_DATA);
230 /* We need a CMSG header followed by the struct */
231 cmh = mtod(ret, struct cmsghdr *);
232 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
233 cmh->cmsg_level = IPPROTO_SCTP;
235 cmh->cmsg_type = SCTP_EXTRCV;
237 memcpy(outinfo, sinfo, len);
239 cmh->cmsg_type = SCTP_SNDRCV;
243 SCTP_BUF_LEN(ret) = cmh->cmsg_len;
249 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
251 struct sctp_sndrcvinfo *sinfo)
253 struct sctp_sndrcvinfo *outinfo;
257 int use_extended = 0;
259 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
260 /* user does not want the sndrcv ctl */
263 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
265 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
267 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
269 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
274 /* We need a CMSG header followed by the struct */
275 cmh = (struct cmsghdr *)buf;
276 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
277 cmh->cmsg_level = IPPROTO_SCTP;
279 cmh->cmsg_type = SCTP_EXTRCV;
281 memcpy(outinfo, sinfo, len);
283 cmh->cmsg_type = SCTP_SNDRCV;
292 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
294 uint32_t gap, i, cumackp1;
297 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
300 cumackp1 = asoc->cumulative_tsn + 1;
301 if (compare_with_wrap(cumackp1, tsn, MAX_TSN)) {
303 * this tsn is behind the cum ack and thus we don't need to
304 * worry about it being moved from one to the other.
308 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
309 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
310 printf("gap:%x tsn:%x\n", gap, tsn);
311 sctp_print_mapping_array(asoc);
313 panic("Things are really messed up now!!");
316 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
318 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
319 asoc->highest_tsn_inside_nr_map = tsn;
321 if (tsn == asoc->highest_tsn_inside_map) {
322 /* We must back down to see what the new highest is */
323 for (i = tsn - 1; (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) ||
324 (i == asoc->mapping_array_base_tsn)); i--) {
325 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 asoc->highest_tsn_inside_map = i;
333 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
340 * We are delivering currently from the reassembly queue. We must continue to
341 * deliver until we either: 1) run out of space. 2) run out of sequential
342 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
345 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
347 struct sctp_tmit_chunk *chk;
353 struct sctp_queued_to_read *control, *ctl, *ctlat;
358 cntDel = stream_no = 0;
359 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
360 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
361 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
362 /* socket above is long gone or going.. */
364 asoc->fragmented_delivery_inprogress = 0;
365 chk = TAILQ_FIRST(&asoc->reasmqueue);
367 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
368 asoc->size_on_reasm_queue -= chk->send_size;
369 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
371 * Lose the data pointer, since its in the socket
375 sctp_m_freem(chk->data);
378 /* Now free the address and data */
379 sctp_free_a_chunk(stcb, chk);
380 /* sa_ignore FREED_MEMORY */
381 chk = TAILQ_FIRST(&asoc->reasmqueue);
385 SCTP_TCB_LOCK_ASSERT(stcb);
387 chk = TAILQ_FIRST(&asoc->reasmqueue);
391 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
392 /* Can't deliver more :< */
395 stream_no = chk->rec.data.stream_number;
396 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
397 if (nxt_todel != chk->rec.data.stream_seq &&
398 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
400 * Not the next sequence to deliver in its stream OR
405 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
407 control = sctp_build_readq_entry_chk(stcb, chk);
408 if (control == NULL) {
412 /* save it off for our future deliveries */
413 stcb->asoc.control_pdapi = control;
414 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
418 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
419 sctp_add_to_readq(stcb->sctp_ep,
420 stcb, control, &stcb->sctp_socket->so_rcv, end,
421 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
424 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
428 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
429 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
430 stcb->asoc.control_pdapi,
431 chk->data, end, chk->rec.data.TSN_seq,
432 &stcb->sctp_socket->so_rcv)) {
434 * something is very wrong, either
435 * control_pdapi is NULL, or the tail_mbuf
436 * is corrupt, or there is a EOM already on
439 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
443 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
444 panic("This should not happen control_pdapi NULL?");
446 /* if we did not panic, it was a EOM */
447 panic("Bad chunking ??");
449 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
450 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
452 SCTP_PRINTF("Bad chunking ??\n");
453 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
461 /* pull it we did it */
462 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
463 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
464 asoc->fragmented_delivery_inprogress = 0;
465 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
466 asoc->strmin[stream_no].last_sequence_delivered++;
468 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
469 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
471 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
473 * turn the flag back on since we just delivered
476 asoc->fragmented_delivery_inprogress = 1;
478 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
479 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
480 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
481 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
483 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
484 asoc->size_on_reasm_queue -= chk->send_size;
485 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
486 /* free up the chk */
488 sctp_free_a_chunk(stcb, chk);
490 if (asoc->fragmented_delivery_inprogress == 0) {
492 * Now lets see if we can deliver the next one on
495 struct sctp_stream_in *strm;
497 strm = &asoc->strmin[stream_no];
498 nxt_todel = strm->last_sequence_delivered + 1;
499 ctl = TAILQ_FIRST(&strm->inqueue);
500 if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
501 while (ctl != NULL) {
502 /* Deliver more if we can. */
503 if (nxt_todel == ctl->sinfo_ssn) {
504 ctlat = TAILQ_NEXT(ctl, next);
505 TAILQ_REMOVE(&strm->inqueue, ctl, next);
506 asoc->size_on_all_streams -= ctl->length;
507 sctp_ucount_decr(asoc->cnt_on_all_streams);
508 strm->last_sequence_delivered++;
509 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
510 sctp_add_to_readq(stcb->sctp_ep, stcb,
512 &stcb->sctp_socket->so_rcv, 1,
513 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
518 nxt_todel = strm->last_sequence_delivered + 1;
523 /* sa_ignore FREED_MEMORY */
524 chk = TAILQ_FIRST(&asoc->reasmqueue);
529 * Queue the chunk either right into the socket buffer if it is the next one
530 * to go OR put it in the correct place in the delivery queue. If we do
531 * append to the so_buf, keep doing so until we are out of order. One big
532 * question still remains, what to do when the socket buffer is FULL??
535 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
536 struct sctp_queued_to_read *control, int *abort_flag)
539 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
540 * all the data in one stream this could happen quite rapidly. One
541 * could use the TSN to keep track of things, but this scheme breaks
542 * down in the other type of stream useage that could occur. Send a
543 * single msg to stream 0, send 4Billion messages to stream 1, now
544 * send a message to stream 0. You have a situation where the TSN
545 * has wrapped but not in the stream. Is this worth worrying about
546 * or should we just change our queue sort at the bottom to be by
549 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
550 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
551 * assignment this could happen... and I don't see how this would be
552 * a violation. So for now I am undecided an will leave the sort by
553 * SSN alone. Maybe a hybred approach is the answer
556 struct sctp_stream_in *strm;
557 struct sctp_queued_to_read *at;
563 asoc->size_on_all_streams += control->length;
564 sctp_ucount_incr(asoc->cnt_on_all_streams);
565 strm = &asoc->strmin[control->sinfo_stream];
566 nxt_todel = strm->last_sequence_delivered + 1;
567 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
568 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
570 SCTPDBG(SCTP_DEBUG_INDATA1,
571 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
572 (uint32_t) control->sinfo_stream,
573 (uint32_t) strm->last_sequence_delivered,
574 (uint32_t) nxt_todel);
575 if (compare_with_wrap(strm->last_sequence_delivered,
576 control->sinfo_ssn, MAX_SEQ) ||
577 (strm->last_sequence_delivered == control->sinfo_ssn)) {
578 /* The incoming sseq is behind where we last delivered? */
579 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
580 control->sinfo_ssn, strm->last_sequence_delivered);
583 * throw it in the stream so it gets cleaned up in
584 * association destruction
586 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
587 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
588 0, M_DONTWAIT, 1, MT_DATA);
590 struct sctp_paramhdr *ph;
593 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
594 (sizeof(uint32_t) * 3);
595 ph = mtod(oper, struct sctp_paramhdr *);
596 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
597 ph->param_length = htons(SCTP_BUF_LEN(oper));
598 ippp = (uint32_t *) (ph + 1);
599 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
601 *ippp = control->sinfo_tsn;
603 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
605 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
606 sctp_abort_an_association(stcb->sctp_ep, stcb,
607 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
613 if (nxt_todel == control->sinfo_ssn) {
614 /* can be delivered right away? */
615 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
618 /* EY it wont be queued if it could be delivered directly */
620 asoc->size_on_all_streams -= control->length;
621 sctp_ucount_decr(asoc->cnt_on_all_streams);
622 strm->last_sequence_delivered++;
624 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
625 sctp_add_to_readq(stcb->sctp_ep, stcb,
627 &stcb->sctp_socket->so_rcv, 1,
628 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
629 control = TAILQ_FIRST(&strm->inqueue);
630 while (control != NULL) {
632 nxt_todel = strm->last_sequence_delivered + 1;
633 if (nxt_todel == control->sinfo_ssn) {
634 at = TAILQ_NEXT(control, next);
635 TAILQ_REMOVE(&strm->inqueue, control, next);
636 asoc->size_on_all_streams -= control->length;
637 sctp_ucount_decr(asoc->cnt_on_all_streams);
638 strm->last_sequence_delivered++;
640 * We ignore the return of deliver_data here
641 * since we always can hold the chunk on the
642 * d-queue. And we have a finite number that
643 * can be delivered from the strq.
645 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
646 sctp_log_strm_del(control, NULL,
647 SCTP_STR_LOG_FROM_IMMED_DEL);
649 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
650 sctp_add_to_readq(stcb->sctp_ep, stcb,
652 &stcb->sctp_socket->so_rcv, 1,
653 SCTP_READ_LOCK_NOT_HELD,
663 * Ok, we did not deliver this guy, find the correct place
664 * to put it on the queue.
666 if ((compare_with_wrap(asoc->cumulative_tsn,
667 control->sinfo_tsn, MAX_TSN)) ||
668 (control->sinfo_tsn == asoc->cumulative_tsn)) {
671 if (TAILQ_EMPTY(&strm->inqueue)) {
673 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
674 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
676 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
678 TAILQ_FOREACH(at, &strm->inqueue, next) {
679 if (compare_with_wrap(at->sinfo_ssn,
680 control->sinfo_ssn, MAX_SEQ)) {
682 * one in queue is bigger than the
683 * new one, insert before this one
685 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
686 sctp_log_strm_del(control, at,
687 SCTP_STR_LOG_FROM_INSERT_MD);
689 TAILQ_INSERT_BEFORE(at, control, next);
691 } else if (at->sinfo_ssn == control->sinfo_ssn) {
693 * Gak, He sent me a duplicate str
697 * foo bar, I guess I will just free
698 * this new guy, should we abort
699 * too? FIX ME MAYBE? Or it COULD be
700 * that the SSN's have wrapped.
701 * Maybe I should compare to TSN
702 * somehow... sigh for now just blow
707 sctp_m_freem(control->data);
708 control->data = NULL;
709 asoc->size_on_all_streams -= control->length;
710 sctp_ucount_decr(asoc->cnt_on_all_streams);
711 if (control->whoFrom) {
712 sctp_free_remote_addr(control->whoFrom);
713 control->whoFrom = NULL;
715 sctp_free_a_readq(stcb, control);
718 if (TAILQ_NEXT(at, next) == NULL) {
720 * We are at the end, insert
723 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
724 sctp_log_strm_del(control, at,
725 SCTP_STR_LOG_FROM_INSERT_TL);
727 TAILQ_INSERT_AFTER(&strm->inqueue,
738 * Returns two things: You get the total size of the deliverable parts of the
739 * first fragmented message on the reassembly queue. And you get a 1 back if
740 * all of the message is ready or a 0 back if the message is still incomplete
743 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
745 struct sctp_tmit_chunk *chk;
749 chk = TAILQ_FIRST(&asoc->reasmqueue);
751 /* nothing on the queue */
754 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
755 /* Not a first on the queue */
758 tsn = chk->rec.data.TSN_seq;
760 if (tsn != chk->rec.data.TSN_seq) {
763 *t_size += chk->send_size;
764 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
768 chk = TAILQ_NEXT(chk, sctp_next);
774 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
776 struct sctp_tmit_chunk *chk;
778 uint32_t tsize, pd_point;
781 chk = TAILQ_FIRST(&asoc->reasmqueue);
784 asoc->size_on_reasm_queue = 0;
785 asoc->cnt_on_reasm_queue = 0;
788 if (asoc->fragmented_delivery_inprogress == 0) {
790 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
791 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
792 (nxt_todel == chk->rec.data.stream_seq ||
793 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
795 * Yep the first one is here and its ok to deliver
798 if (stcb->sctp_socket) {
799 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
800 stcb->sctp_ep->partial_delivery_point);
802 pd_point = stcb->sctp_ep->partial_delivery_point;
804 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
807 * Yes, we setup to start reception, by
808 * backing down the TSN just in case we
809 * can't deliver. If we
811 asoc->fragmented_delivery_inprogress = 1;
812 asoc->tsn_last_delivered =
813 chk->rec.data.TSN_seq - 1;
815 chk->rec.data.stream_number;
816 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
817 asoc->pdapi_ppid = chk->rec.data.payloadtype;
818 asoc->fragment_flags = chk->rec.data.rcv_flags;
819 sctp_service_reassembly(stcb, asoc);
824 * Service re-assembly will deliver stream data queued at
825 * the end of fragmented delivery.. but it wont know to go
826 * back and call itself again... we do that here with the
829 sctp_service_reassembly(stcb, asoc);
830 if (asoc->fragmented_delivery_inprogress == 0) {
832 * finished our Fragmented delivery, could be more
841 * Dump onto the re-assembly queue, in its proper place. After dumping on the
842 * queue, see if anthing can be delivered. If so pull it off (or as much as
843 * we can. If we run out of space then we must dump what we can and set the
844 * appropriate flag to say we queued what we could.
847 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
848 struct sctp_tmit_chunk *chk, int *abort_flag)
851 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
853 struct sctp_tmit_chunk *at, *prev, *next;
856 cum_ackp1 = asoc->tsn_last_delivered + 1;
857 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
858 /* This is the first one on the queue */
859 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
861 * we do not check for delivery of anything when only one
864 asoc->size_on_reasm_queue = chk->send_size;
865 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
866 if (chk->rec.data.TSN_seq == cum_ackp1) {
867 if (asoc->fragmented_delivery_inprogress == 0 &&
868 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
869 SCTP_DATA_FIRST_FRAG) {
871 * An empty queue, no delivery inprogress,
872 * we hit the next one and it does NOT have
873 * a FIRST fragment mark.
875 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
876 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
877 0, M_DONTWAIT, 1, MT_DATA);
880 struct sctp_paramhdr *ph;
884 sizeof(struct sctp_paramhdr) +
885 (sizeof(uint32_t) * 3);
886 ph = mtod(oper, struct sctp_paramhdr *);
888 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
889 ph->param_length = htons(SCTP_BUF_LEN(oper));
890 ippp = (uint32_t *) (ph + 1);
891 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
893 *ippp = chk->rec.data.TSN_seq;
895 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
898 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
899 sctp_abort_an_association(stcb->sctp_ep, stcb,
900 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
902 } else if (asoc->fragmented_delivery_inprogress &&
903 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
905 * We are doing a partial delivery and the
906 * NEXT chunk MUST be either the LAST or
907 * MIDDLE fragment NOT a FIRST
909 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
910 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
911 0, M_DONTWAIT, 1, MT_DATA);
913 struct sctp_paramhdr *ph;
917 sizeof(struct sctp_paramhdr) +
918 (3 * sizeof(uint32_t));
919 ph = mtod(oper, struct sctp_paramhdr *);
921 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
922 ph->param_length = htons(SCTP_BUF_LEN(oper));
923 ippp = (uint32_t *) (ph + 1);
924 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
926 *ippp = chk->rec.data.TSN_seq;
928 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
930 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
931 sctp_abort_an_association(stcb->sctp_ep, stcb,
932 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
934 } else if (asoc->fragmented_delivery_inprogress) {
936 * Here we are ok with a MIDDLE or LAST
939 if (chk->rec.data.stream_number !=
940 asoc->str_of_pdapi) {
941 /* Got to be the right STR No */
942 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
943 chk->rec.data.stream_number,
945 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
946 0, M_DONTWAIT, 1, MT_DATA);
948 struct sctp_paramhdr *ph;
952 sizeof(struct sctp_paramhdr) +
953 (sizeof(uint32_t) * 3);
955 struct sctp_paramhdr *);
957 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
959 htons(SCTP_BUF_LEN(oper));
960 ippp = (uint32_t *) (ph + 1);
961 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
963 *ippp = chk->rec.data.TSN_seq;
965 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
967 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
968 sctp_abort_an_association(stcb->sctp_ep,
969 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
971 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
972 SCTP_DATA_UNORDERED &&
973 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
974 /* Got to be the right STR Seq */
975 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
976 chk->rec.data.stream_seq,
978 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
979 0, M_DONTWAIT, 1, MT_DATA);
981 struct sctp_paramhdr *ph;
985 sizeof(struct sctp_paramhdr) +
986 (3 * sizeof(uint32_t));
988 struct sctp_paramhdr *);
990 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
992 htons(SCTP_BUF_LEN(oper));
993 ippp = (uint32_t *) (ph + 1);
994 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
996 *ippp = chk->rec.data.TSN_seq;
998 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1001 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1002 sctp_abort_an_association(stcb->sctp_ep,
1003 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1010 /* Find its place */
1011 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1012 if (compare_with_wrap(at->rec.data.TSN_seq,
1013 chk->rec.data.TSN_seq, MAX_TSN)) {
1015 * one in queue is bigger than the new one, insert
1019 asoc->size_on_reasm_queue += chk->send_size;
1020 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1022 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1024 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1025 /* Gak, He sent me a duplicate str seq number */
1027 * foo bar, I guess I will just free this new guy,
1028 * should we abort too? FIX ME MAYBE? Or it COULD be
1029 * that the SSN's have wrapped. Maybe I should
1030 * compare to TSN somehow... sigh for now just blow
1034 sctp_m_freem(chk->data);
1037 sctp_free_a_chunk(stcb, chk);
1040 last_flags = at->rec.data.rcv_flags;
1041 last_tsn = at->rec.data.TSN_seq;
1043 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1045 * We are at the end, insert it after this
1048 /* check it first */
1049 asoc->size_on_reasm_queue += chk->send_size;
1050 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1051 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1056 /* Now the audits */
1058 prev_tsn = chk->rec.data.TSN_seq - 1;
1059 if (prev_tsn == prev->rec.data.TSN_seq) {
1061 * Ok the one I am dropping onto the end is the
1062 * NEXT. A bit of valdiation here.
1064 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1065 SCTP_DATA_FIRST_FRAG ||
1066 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1067 SCTP_DATA_MIDDLE_FRAG) {
1069 * Insert chk MUST be a MIDDLE or LAST
1072 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1073 SCTP_DATA_FIRST_FRAG) {
1074 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1075 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1076 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1077 0, M_DONTWAIT, 1, MT_DATA);
1079 struct sctp_paramhdr *ph;
1082 SCTP_BUF_LEN(oper) =
1083 sizeof(struct sctp_paramhdr) +
1084 (3 * sizeof(uint32_t));
1086 struct sctp_paramhdr *);
1088 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1090 htons(SCTP_BUF_LEN(oper));
1091 ippp = (uint32_t *) (ph + 1);
1092 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1094 *ippp = chk->rec.data.TSN_seq;
1096 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1099 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1100 sctp_abort_an_association(stcb->sctp_ep,
1101 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1105 if (chk->rec.data.stream_number !=
1106 prev->rec.data.stream_number) {
1108 * Huh, need the correct STR here,
1109 * they must be the same.
1111 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1112 chk->rec.data.stream_number,
1113 prev->rec.data.stream_number);
1114 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1115 0, M_DONTWAIT, 1, MT_DATA);
1117 struct sctp_paramhdr *ph;
1120 SCTP_BUF_LEN(oper) =
1121 sizeof(struct sctp_paramhdr) +
1122 (3 * sizeof(uint32_t));
1124 struct sctp_paramhdr *);
1126 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1128 htons(SCTP_BUF_LEN(oper));
1129 ippp = (uint32_t *) (ph + 1);
1130 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1132 *ippp = chk->rec.data.TSN_seq;
1134 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1136 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1137 sctp_abort_an_association(stcb->sctp_ep,
1138 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1143 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1144 chk->rec.data.stream_seq !=
1145 prev->rec.data.stream_seq) {
1147 * Huh, need the correct STR here,
1148 * they must be the same.
1150 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1151 chk->rec.data.stream_seq,
1152 prev->rec.data.stream_seq);
1153 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1154 0, M_DONTWAIT, 1, MT_DATA);
1156 struct sctp_paramhdr *ph;
1159 SCTP_BUF_LEN(oper) =
1160 sizeof(struct sctp_paramhdr) +
1161 (3 * sizeof(uint32_t));
1163 struct sctp_paramhdr *);
1165 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1167 htons(SCTP_BUF_LEN(oper));
1168 ippp = (uint32_t *) (ph + 1);
1169 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1171 *ippp = chk->rec.data.TSN_seq;
1173 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1175 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1176 sctp_abort_an_association(stcb->sctp_ep,
1177 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1182 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1183 SCTP_DATA_LAST_FRAG) {
1184 /* Insert chk MUST be a FIRST */
1185 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1186 SCTP_DATA_FIRST_FRAG) {
1187 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1188 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1189 0, M_DONTWAIT, 1, MT_DATA);
1191 struct sctp_paramhdr *ph;
1194 SCTP_BUF_LEN(oper) =
1195 sizeof(struct sctp_paramhdr) +
1196 (3 * sizeof(uint32_t));
1198 struct sctp_paramhdr *);
1200 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1202 htons(SCTP_BUF_LEN(oper));
1203 ippp = (uint32_t *) (ph + 1);
1204 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1206 *ippp = chk->rec.data.TSN_seq;
1208 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1211 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1212 sctp_abort_an_association(stcb->sctp_ep,
1213 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1222 post_tsn = chk->rec.data.TSN_seq + 1;
1223 if (post_tsn == next->rec.data.TSN_seq) {
1225 * Ok the one I am inserting ahead of is my NEXT
1226 * one. A bit of valdiation here.
1228 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1229 /* Insert chk MUST be a last fragment */
1230 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1231 != SCTP_DATA_LAST_FRAG) {
1232 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1233 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1234 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1235 0, M_DONTWAIT, 1, MT_DATA);
1237 struct sctp_paramhdr *ph;
1240 SCTP_BUF_LEN(oper) =
1241 sizeof(struct sctp_paramhdr) +
1242 (3 * sizeof(uint32_t));
1244 struct sctp_paramhdr *);
1246 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1248 htons(SCTP_BUF_LEN(oper));
1249 ippp = (uint32_t *) (ph + 1);
1250 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1252 *ippp = chk->rec.data.TSN_seq;
1254 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1256 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1257 sctp_abort_an_association(stcb->sctp_ep,
1258 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1263 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1264 SCTP_DATA_MIDDLE_FRAG ||
1265 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1266 SCTP_DATA_LAST_FRAG) {
1268 * Insert chk CAN be MIDDLE or FIRST NOT
1271 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1272 SCTP_DATA_LAST_FRAG) {
1273 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1274 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1275 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1276 0, M_DONTWAIT, 1, MT_DATA);
1278 struct sctp_paramhdr *ph;
1281 SCTP_BUF_LEN(oper) =
1282 sizeof(struct sctp_paramhdr) +
1283 (3 * sizeof(uint32_t));
1285 struct sctp_paramhdr *);
1287 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1289 htons(SCTP_BUF_LEN(oper));
1290 ippp = (uint32_t *) (ph + 1);
1291 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1293 *ippp = chk->rec.data.TSN_seq;
1295 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1298 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1299 sctp_abort_an_association(stcb->sctp_ep,
1300 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1305 if (chk->rec.data.stream_number !=
1306 next->rec.data.stream_number) {
1308 * Huh, need the correct STR here,
1309 * they must be the same.
1311 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1312 chk->rec.data.stream_number,
1313 next->rec.data.stream_number);
1314 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1315 0, M_DONTWAIT, 1, MT_DATA);
1317 struct sctp_paramhdr *ph;
1320 SCTP_BUF_LEN(oper) =
1321 sizeof(struct sctp_paramhdr) +
1322 (3 * sizeof(uint32_t));
1324 struct sctp_paramhdr *);
1326 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1328 htons(SCTP_BUF_LEN(oper));
1329 ippp = (uint32_t *) (ph + 1);
1330 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1332 *ippp = chk->rec.data.TSN_seq;
1334 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1337 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1338 sctp_abort_an_association(stcb->sctp_ep,
1339 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1344 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1345 chk->rec.data.stream_seq !=
1346 next->rec.data.stream_seq) {
1348 * Huh, need the correct STR here,
1349 * they must be the same.
1351 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1352 chk->rec.data.stream_seq,
1353 next->rec.data.stream_seq);
1354 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1355 0, M_DONTWAIT, 1, MT_DATA);
1357 struct sctp_paramhdr *ph;
1360 SCTP_BUF_LEN(oper) =
1361 sizeof(struct sctp_paramhdr) +
1362 (3 * sizeof(uint32_t));
1364 struct sctp_paramhdr *);
1366 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1368 htons(SCTP_BUF_LEN(oper));
1369 ippp = (uint32_t *) (ph + 1);
1370 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1372 *ippp = chk->rec.data.TSN_seq;
1374 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1376 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1377 sctp_abort_an_association(stcb->sctp_ep,
1378 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1386 /* Do we need to do some delivery? check */
1387 sctp_deliver_reasm_check(stcb, asoc);
1391 * This is an unfortunate routine. It checks to make sure a evil guy is not
1392 * stuffing us full of bad packet fragments. A broken peer could also do this
1393 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1397 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1400 struct sctp_tmit_chunk *at;
1403 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1404 if (compare_with_wrap(TSN_seq,
1405 at->rec.data.TSN_seq, MAX_TSN)) {
1406 /* is it one bigger? */
1407 tsn_est = at->rec.data.TSN_seq + 1;
1408 if (tsn_est == TSN_seq) {
1409 /* yep. It better be a last then */
1410 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1411 SCTP_DATA_LAST_FRAG) {
1413 * Ok this guy belongs next to a guy
1414 * that is NOT last, it should be a
1415 * middle/last, not a complete
1421 * This guy is ok since its a LAST
1422 * and the new chunk is a fully
1423 * self- contained one.
1428 } else if (TSN_seq == at->rec.data.TSN_seq) {
1429 /* Software error since I have a dup? */
1433 * Ok, 'at' is larger than new chunk but does it
1434 * need to be right before it.
1436 tsn_est = TSN_seq + 1;
1437 if (tsn_est == at->rec.data.TSN_seq) {
1438 /* Yep, It better be a first */
1439 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1440 SCTP_DATA_FIRST_FRAG) {
1453 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1454 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1455 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1456 int *break_flag, int last_chunk)
1458 /* Process a data chunk */
1459 /* struct sctp_tmit_chunk *chk; */
1460 struct sctp_tmit_chunk *chk;
1464 int need_reasm_check = 0;
1465 uint16_t strmno, strmseq;
1467 struct sctp_queued_to_read *control;
1469 uint32_t protocol_id;
1470 uint8_t chunk_flags;
1471 struct sctp_stream_reset_list *liste;
1474 tsn = ntohl(ch->dp.tsn);
1475 chunk_flags = ch->ch.chunk_flags;
1476 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1477 asoc->send_sack = 1;
1479 protocol_id = ch->dp.protocol_id;
1480 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1481 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1482 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1487 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1488 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1489 asoc->cumulative_tsn == tsn) {
1490 /* It is a duplicate */
1491 SCTP_STAT_INCR(sctps_recvdupdata);
1492 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1493 /* Record a dup for the next outbound sack */
1494 asoc->dup_tsns[asoc->numduptsns] = tsn;
1497 asoc->send_sack = 1;
1500 /* Calculate the number of TSN's between the base and this TSN */
1501 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1502 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1503 /* Can't hold the bit in the mapping at max array, toss it */
1506 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1507 SCTP_TCB_LOCK_ASSERT(stcb);
1508 if (sctp_expand_mapping_array(asoc, gap)) {
1509 /* Can't expand, drop it */
1513 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1516 /* See if we have received this one already */
1517 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1518 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1519 SCTP_STAT_INCR(sctps_recvdupdata);
1520 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1521 /* Record a dup for the next outbound sack */
1522 asoc->dup_tsns[asoc->numduptsns] = tsn;
1525 asoc->send_sack = 1;
1529 * Check to see about the GONE flag, duplicates would cause a sack
1530 * to be sent up above
1532 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1533 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1534 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1537 * wait a minute, this guy is gone, there is no longer a
1538 * receiver. Send peer an ABORT!
1540 struct mbuf *op_err;
1542 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1543 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1548 * Now before going further we see if there is room. If NOT then we
1549 * MAY let one through only IF this TSN is the one we are waiting
1550 * for on a partial delivery API.
1553 /* now do the tests */
1554 if (((asoc->cnt_on_all_streams +
1555 asoc->cnt_on_reasm_queue +
1556 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1557 (((int)asoc->my_rwnd) <= 0)) {
1559 * When we have NO room in the rwnd we check to make sure
1560 * the reader is doing its job...
1562 if (stcb->sctp_socket->so_rcv.sb_cc) {
1563 /* some to read, wake-up */
1564 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1567 so = SCTP_INP_SO(stcb->sctp_ep);
1568 atomic_add_int(&stcb->asoc.refcnt, 1);
1569 SCTP_TCB_UNLOCK(stcb);
1570 SCTP_SOCKET_LOCK(so, 1);
1571 SCTP_TCB_LOCK(stcb);
1572 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1573 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1574 /* assoc was freed while we were unlocked */
1575 SCTP_SOCKET_UNLOCK(so, 1);
1579 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1580 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1581 SCTP_SOCKET_UNLOCK(so, 1);
1584 /* now is it in the mapping array of what we have accepted? */
1585 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
1586 compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1587 /* Nope not in the valid range dump it */
1588 sctp_set_rwnd(stcb, asoc);
1589 if ((asoc->cnt_on_all_streams +
1590 asoc->cnt_on_reasm_queue +
1591 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1592 SCTP_STAT_INCR(sctps_datadropchklmt);
1594 SCTP_STAT_INCR(sctps_datadroprwnd);
1601 strmno = ntohs(ch->dp.stream_id);
1602 if (strmno >= asoc->streamincnt) {
1603 struct sctp_paramhdr *phdr;
1606 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1607 0, M_DONTWAIT, 1, MT_DATA);
1609 /* add some space up front so prepend will work well */
1610 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1611 phdr = mtod(mb, struct sctp_paramhdr *);
1613 * Error causes are just param's and this one has
1614 * two back to back phdr, one with the error type
1615 * and size, the other with the streamid and a rsvd
1617 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1618 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1619 phdr->param_length =
1620 htons(sizeof(struct sctp_paramhdr) * 2);
1622 /* We insert the stream in the type field */
1623 phdr->param_type = ch->dp.stream_id;
1624 /* And set the length to 0 for the rsvd field */
1625 phdr->param_length = 0;
1626 sctp_queue_op_err(stcb, mb);
1628 SCTP_STAT_INCR(sctps_badsid);
1629 SCTP_TCB_LOCK_ASSERT(stcb);
1630 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1631 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1632 asoc->highest_tsn_inside_nr_map = tsn;
1634 if (tsn == (asoc->cumulative_tsn + 1)) {
1635 /* Update cum-ack */
1636 asoc->cumulative_tsn = tsn;
1641 * Before we continue lets validate that we are not being fooled by
1642 * an evil attacker. We can only have 4k chunks based on our TSN
1643 * spread allowed by the mapping array 512 * 8 bits, so there is no
1644 * way our stream sequence numbers could have wrapped. We of course
1645 * only validate the FIRST fragment so the bit must be set.
1647 strmseq = ntohs(ch->dp.stream_sequence);
1648 #ifdef SCTP_ASOCLOG_OF_TSNS
1649 SCTP_TCB_LOCK_ASSERT(stcb);
1650 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1651 asoc->tsn_in_at = 0;
1652 asoc->tsn_in_wrapped = 1;
1654 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1655 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1656 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1657 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1658 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1659 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1660 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1661 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1664 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1665 (TAILQ_EMPTY(&asoc->resetHead)) &&
1666 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1667 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1668 strmseq, MAX_SEQ) ||
1669 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1670 /* The incoming sseq is behind where we last delivered? */
1671 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1672 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1673 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1674 0, M_DONTWAIT, 1, MT_DATA);
1676 struct sctp_paramhdr *ph;
1679 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1680 (3 * sizeof(uint32_t));
1681 ph = mtod(oper, struct sctp_paramhdr *);
1682 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1683 ph->param_length = htons(SCTP_BUF_LEN(oper));
1684 ippp = (uint32_t *) (ph + 1);
1685 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1689 *ippp = ((strmno << 16) | strmseq);
1692 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1693 sctp_abort_an_association(stcb->sctp_ep, stcb,
1694 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1698 /************************************
1699 * From here down we may find ch-> invalid
1700 * so its a good idea NOT to use it.
1701 *************************************/
1703 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1704 if (last_chunk == 0) {
1705 dmbuf = SCTP_M_COPYM(*m,
1706 (offset + sizeof(struct sctp_data_chunk)),
1707 the_len, M_DONTWAIT);
1708 #ifdef SCTP_MBUF_LOGGING
1709 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1714 if (SCTP_BUF_IS_EXTENDED(mat)) {
1715 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1717 mat = SCTP_BUF_NEXT(mat);
1722 /* We can steal the last chunk */
1726 /* lop off the top part */
1727 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1728 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1729 l_len = SCTP_BUF_LEN(dmbuf);
1732 * need to count up the size hopefully does not hit
1740 l_len += SCTP_BUF_LEN(lat);
1741 lat = SCTP_BUF_NEXT(lat);
1744 if (l_len > the_len) {
1745 /* Trim the end round bytes off too */
1746 m_adj(dmbuf, -(l_len - the_len));
1749 if (dmbuf == NULL) {
1750 SCTP_STAT_INCR(sctps_nomem);
1753 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1754 asoc->fragmented_delivery_inprogress == 0 &&
1755 TAILQ_EMPTY(&asoc->resetHead) &&
1757 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1758 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1759 /* Candidate for express delivery */
1761 * Its not fragmented, No PD-API is up, Nothing in the
1762 * delivery queue, Its un-ordered OR ordered and the next to
1763 * deliver AND nothing else is stuck on the stream queue,
1764 * And there is room for it in the socket buffer. Lets just
1765 * stuff it up the buffer....
1768 /* It would be nice to avoid this copy if we could :< */
1769 sctp_alloc_a_readq(stcb, control);
1770 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1776 if (control == NULL) {
1777 goto failed_express_del;
1779 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1780 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1781 asoc->highest_tsn_inside_nr_map = tsn;
1783 sctp_add_to_readq(stcb->sctp_ep, stcb,
1784 control, &stcb->sctp_socket->so_rcv,
1785 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1787 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1788 /* for ordered, bump what we delivered */
1789 asoc->strmin[strmno].last_sequence_delivered++;
1791 SCTP_STAT_INCR(sctps_recvexpress);
1792 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1793 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1794 SCTP_STR_LOG_FROM_EXPRS_DEL);
1798 goto finish_express_del;
1801 /* If we reach here this is a new chunk */
1804 /* Express for fragmented delivery? */
1805 if ((asoc->fragmented_delivery_inprogress) &&
1806 (stcb->asoc.control_pdapi) &&
1807 (asoc->str_of_pdapi == strmno) &&
1808 (asoc->ssn_of_pdapi == strmseq)
1810 control = stcb->asoc.control_pdapi;
1811 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1812 /* Can't be another first? */
1813 goto failed_pdapi_express_del;
1815 if (tsn == (control->sinfo_tsn + 1)) {
1816 /* Yep, we can add it on */
1820 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1823 cumack = asoc->cumulative_tsn;
1824 if ((cumack + 1) == tsn)
1827 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1829 &stcb->sctp_socket->so_rcv)) {
1830 SCTP_PRINTF("Append fails end:%d\n", end);
1831 goto failed_pdapi_express_del;
1833 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1834 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1835 asoc->highest_tsn_inside_nr_map = tsn;
1837 SCTP_STAT_INCR(sctps_recvexpressm);
1838 control->sinfo_tsn = tsn;
1839 asoc->tsn_last_delivered = tsn;
1840 asoc->fragment_flags = chunk_flags;
1841 asoc->tsn_of_pdapi_last_delivered = tsn;
1842 asoc->last_flags_delivered = chunk_flags;
1843 asoc->last_strm_seq_delivered = strmseq;
1844 asoc->last_strm_no_delivered = strmno;
1846 /* clean up the flags and such */
1847 asoc->fragmented_delivery_inprogress = 0;
1848 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1849 asoc->strmin[strmno].last_sequence_delivered++;
1851 stcb->asoc.control_pdapi = NULL;
1852 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1854 * There could be another message
1857 need_reasm_check = 1;
1861 goto finish_express_del;
1864 failed_pdapi_express_del:
1866 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1867 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1868 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1869 asoc->highest_tsn_inside_nr_map = tsn;
1872 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1873 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1874 asoc->highest_tsn_inside_map = tsn;
1877 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1878 sctp_alloc_a_chunk(stcb, chk);
1880 /* No memory so we drop the chunk */
1881 SCTP_STAT_INCR(sctps_nomem);
1882 if (last_chunk == 0) {
1883 /* we copied it, free the copy */
1884 sctp_m_freem(dmbuf);
1888 chk->rec.data.TSN_seq = tsn;
1889 chk->no_fr_allowed = 0;
1890 chk->rec.data.stream_seq = strmseq;
1891 chk->rec.data.stream_number = strmno;
1892 chk->rec.data.payloadtype = protocol_id;
1893 chk->rec.data.context = stcb->asoc.context;
1894 chk->rec.data.doing_fast_retransmit = 0;
1895 chk->rec.data.rcv_flags = chunk_flags;
1897 chk->send_size = the_len;
1899 atomic_add_int(&net->ref_count, 1);
1902 sctp_alloc_a_readq(stcb, control);
1903 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1909 if (control == NULL) {
1910 /* No memory so we drop the chunk */
1911 SCTP_STAT_INCR(sctps_nomem);
1912 if (last_chunk == 0) {
1913 /* we copied it, free the copy */
1914 sctp_m_freem(dmbuf);
1918 control->length = the_len;
1921 /* Mark it as received */
1922 /* Now queue it where it belongs */
1923 if (control != NULL) {
1924 /* First a sanity check */
1925 if (asoc->fragmented_delivery_inprogress) {
1927 * Ok, we have a fragmented delivery in progress if
1928 * this chunk is next to deliver OR belongs in our
1929 * view to the reassembly, the peer is evil or
1932 uint32_t estimate_tsn;
1934 estimate_tsn = asoc->tsn_last_delivered + 1;
1935 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1936 (estimate_tsn == control->sinfo_tsn)) {
1937 /* Evil/Broke peer */
1938 sctp_m_freem(control->data);
1939 control->data = NULL;
1940 if (control->whoFrom) {
1941 sctp_free_remote_addr(control->whoFrom);
1942 control->whoFrom = NULL;
1944 sctp_free_a_readq(stcb, control);
1945 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1946 0, M_DONTWAIT, 1, MT_DATA);
1948 struct sctp_paramhdr *ph;
1951 SCTP_BUF_LEN(oper) =
1952 sizeof(struct sctp_paramhdr) +
1953 (3 * sizeof(uint32_t));
1954 ph = mtod(oper, struct sctp_paramhdr *);
1956 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1957 ph->param_length = htons(SCTP_BUF_LEN(oper));
1958 ippp = (uint32_t *) (ph + 1);
1959 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1963 *ippp = ((strmno << 16) | strmseq);
1965 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1966 sctp_abort_an_association(stcb->sctp_ep, stcb,
1967 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1972 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1973 sctp_m_freem(control->data);
1974 control->data = NULL;
1975 if (control->whoFrom) {
1976 sctp_free_remote_addr(control->whoFrom);
1977 control->whoFrom = NULL;
1979 sctp_free_a_readq(stcb, control);
1981 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1982 0, M_DONTWAIT, 1, MT_DATA);
1984 struct sctp_paramhdr *ph;
1987 SCTP_BUF_LEN(oper) =
1988 sizeof(struct sctp_paramhdr) +
1989 (3 * sizeof(uint32_t));
1991 struct sctp_paramhdr *);
1993 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1995 htons(SCTP_BUF_LEN(oper));
1996 ippp = (uint32_t *) (ph + 1);
1997 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
2001 *ippp = ((strmno << 16) | strmseq);
2003 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2004 sctp_abort_an_association(stcb->sctp_ep,
2005 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2012 /* No PDAPI running */
2013 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2015 * Reassembly queue is NOT empty validate
2016 * that this tsn does not need to be in
2017 * reasembly queue. If it does then our peer
2018 * is broken or evil.
2020 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2021 sctp_m_freem(control->data);
2022 control->data = NULL;
2023 if (control->whoFrom) {
2024 sctp_free_remote_addr(control->whoFrom);
2025 control->whoFrom = NULL;
2027 sctp_free_a_readq(stcb, control);
2028 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2029 0, M_DONTWAIT, 1, MT_DATA);
2031 struct sctp_paramhdr *ph;
2034 SCTP_BUF_LEN(oper) =
2035 sizeof(struct sctp_paramhdr) +
2036 (3 * sizeof(uint32_t));
2038 struct sctp_paramhdr *);
2040 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2042 htons(SCTP_BUF_LEN(oper));
2043 ippp = (uint32_t *) (ph + 1);
2044 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2048 *ippp = ((strmno << 16) | strmseq);
2050 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2051 sctp_abort_an_association(stcb->sctp_ep,
2052 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2059 /* ok, if we reach here we have passed the sanity checks */
2060 if (chunk_flags & SCTP_DATA_UNORDERED) {
2061 /* queue directly into socket buffer */
2062 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2063 sctp_add_to_readq(stcb->sctp_ep, stcb,
2065 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2068 * Special check for when streams are resetting. We
2069 * could be more smart about this and check the
2070 * actual stream to see if it is not being reset..
2071 * that way we would not create a HOLB when amongst
2072 * streams being reset and those not being reset.
2074 * We take complete messages that have a stream reset
2075 * intervening (aka the TSN is after where our
2076 * cum-ack needs to be) off and put them on a
2077 * pending_reply_queue. The reassembly ones we do
2078 * not have to worry about since they are all sorted
2079 * and proceessed by TSN order. It is only the
2080 * singletons I must worry about.
2082 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2083 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2086 * yep its past where we need to reset... go
2087 * ahead and queue it.
2089 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2091 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2093 struct sctp_queued_to_read *ctlOn;
2094 unsigned char inserted = 0;
2096 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2098 if (compare_with_wrap(control->sinfo_tsn,
2099 ctlOn->sinfo_tsn, MAX_TSN)) {
2100 ctlOn = TAILQ_NEXT(ctlOn, next);
2103 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2108 if (inserted == 0) {
2110 * must be put at end, use
2111 * prevP (all setup from
2112 * loop) to setup nextP.
2114 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2118 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2125 /* Into the re-assembly queue */
2126 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2129 * the assoc is now gone and chk was put onto the
2130 * reasm queue, which has all been freed.
2137 if (tsn == (asoc->cumulative_tsn + 1)) {
2138 /* Update cum-ack */
2139 asoc->cumulative_tsn = tsn;
2145 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2147 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2149 SCTP_STAT_INCR(sctps_recvdata);
2150 /* Set it present please */
2151 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2152 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2154 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2155 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2156 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2158 /* check the special flag for stream resets */
2159 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2160 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2161 (asoc->cumulative_tsn == liste->tsn))
2164 * we have finished working through the backlogged TSN's now
2165 * time to reset streams. 1: call reset function. 2: free
2166 * pending_reply space 3: distribute any chunks in
2167 * pending_reply_queue.
2169 struct sctp_queued_to_read *ctl;
2171 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2172 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2173 SCTP_FREE(liste, SCTP_M_STRESET);
2174 /* sa_ignore FREED_MEMORY */
2175 liste = TAILQ_FIRST(&asoc->resetHead);
2176 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2177 if (ctl && (liste == NULL)) {
2178 /* All can be removed */
2180 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2181 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2185 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2188 /* more than one in queue */
2189 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2191 * if ctl->sinfo_tsn is <= liste->tsn we can
2192 * process it which is the NOT of
2193 * ctl->sinfo_tsn > liste->tsn
2195 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2196 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2200 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2204 * Now service re-assembly to pick up anything that has been
2205 * held on reassembly queue?
2207 sctp_deliver_reasm_check(stcb, asoc);
2208 need_reasm_check = 0;
2210 if (need_reasm_check) {
2211 /* Another one waits ? */
2212 sctp_deliver_reasm_check(stcb, asoc);
2217 int8_t sctp_map_lookup_tab[256] = {
2218 0, 1, 0, 2, 0, 1, 0, 3,
2219 0, 1, 0, 2, 0, 1, 0, 4,
2220 0, 1, 0, 2, 0, 1, 0, 3,
2221 0, 1, 0, 2, 0, 1, 0, 5,
2222 0, 1, 0, 2, 0, 1, 0, 3,
2223 0, 1, 0, 2, 0, 1, 0, 4,
2224 0, 1, 0, 2, 0, 1, 0, 3,
2225 0, 1, 0, 2, 0, 1, 0, 6,
2226 0, 1, 0, 2, 0, 1, 0, 3,
2227 0, 1, 0, 2, 0, 1, 0, 4,
2228 0, 1, 0, 2, 0, 1, 0, 3,
2229 0, 1, 0, 2, 0, 1, 0, 5,
2230 0, 1, 0, 2, 0, 1, 0, 3,
2231 0, 1, 0, 2, 0, 1, 0, 4,
2232 0, 1, 0, 2, 0, 1, 0, 3,
2233 0, 1, 0, 2, 0, 1, 0, 7,
2234 0, 1, 0, 2, 0, 1, 0, 3,
2235 0, 1, 0, 2, 0, 1, 0, 4,
2236 0, 1, 0, 2, 0, 1, 0, 3,
2237 0, 1, 0, 2, 0, 1, 0, 5,
2238 0, 1, 0, 2, 0, 1, 0, 3,
2239 0, 1, 0, 2, 0, 1, 0, 4,
2240 0, 1, 0, 2, 0, 1, 0, 3,
2241 0, 1, 0, 2, 0, 1, 0, 6,
2242 0, 1, 0, 2, 0, 1, 0, 3,
2243 0, 1, 0, 2, 0, 1, 0, 4,
2244 0, 1, 0, 2, 0, 1, 0, 3,
2245 0, 1, 0, 2, 0, 1, 0, 5,
2246 0, 1, 0, 2, 0, 1, 0, 3,
2247 0, 1, 0, 2, 0, 1, 0, 4,
2248 0, 1, 0, 2, 0, 1, 0, 3,
2249 0, 1, 0, 2, 0, 1, 0, 8
2254 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2257 * Now we also need to check the mapping array in a couple of ways.
2258 * 1) Did we move the cum-ack point?
2260 * When you first glance at this you might think that all entries that
2261 * make up the postion of the cum-ack would be in the nr-mapping
2262 * array only.. i.e. things up to the cum-ack are always
2263 * deliverable. Thats true with one exception, when its a fragmented
2264 * message we may not deliver the data until some threshold (or all
2265 * of it) is in place. So we must OR the nr_mapping_array and
2266 * mapping_array to get a true picture of the cum-ack.
2268 struct sctp_association *asoc;
2271 int slide_from, slide_end, lgap, distance;
2272 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2277 old_cumack = asoc->cumulative_tsn;
2278 old_base = asoc->mapping_array_base_tsn;
2279 old_highest = asoc->highest_tsn_inside_map;
2281 * We could probably improve this a small bit by calculating the
2282 * offset of the current cum-ack as the starting point.
2285 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2286 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2290 /* there is a 0 bit */
2291 at += sctp_map_lookup_tab[val];
2295 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2297 if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
2298 compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2300 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2301 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2303 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2304 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2305 sctp_print_mapping_array(asoc);
2306 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2307 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2309 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2310 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2313 if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2314 asoc->highest_tsn_inside_map,
2316 highest_tsn = asoc->highest_tsn_inside_nr_map;
2318 highest_tsn = asoc->highest_tsn_inside_map;
2320 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2321 /* The complete array was completed by a single FR */
2322 /* highest becomes the cum-ack */
2330 /* clear the array */
2331 clr = ((at + 7) >> 3);
2332 if (clr > asoc->mapping_array_size) {
2333 clr = asoc->mapping_array_size;
2335 memset(asoc->mapping_array, 0, clr);
2336 memset(asoc->nr_mapping_array, 0, clr);
2338 for (i = 0; i < asoc->mapping_array_size; i++) {
2339 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2340 printf("Error Mapping array's not clean at clear\n");
2341 sctp_print_mapping_array(asoc);
2345 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2346 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2347 } else if (at >= 8) {
2348 /* we can slide the mapping array down */
2349 /* slide_from holds where we hit the first NON 0xff byte */
2352 * now calculate the ceiling of the move using our highest
2355 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2356 slide_end = (lgap >> 3);
2357 if (slide_end < slide_from) {
2358 sctp_print_mapping_array(asoc);
2360 panic("impossible slide");
2362 printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2363 lgap, slide_end, slide_from, at);
2367 if (slide_end > asoc->mapping_array_size) {
2369 panic("would overrun buffer");
2371 printf("Gak, would have overrun map end:%d slide_end:%d\n",
2372 asoc->mapping_array_size, slide_end);
2373 slide_end = asoc->mapping_array_size;
2376 distance = (slide_end - slide_from) + 1;
2377 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2378 sctp_log_map(old_base, old_cumack, old_highest,
2379 SCTP_MAP_PREPARE_SLIDE);
2380 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2381 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2383 if (distance + slide_from > asoc->mapping_array_size ||
2386 * Here we do NOT slide forward the array so that
2387 * hopefully when more data comes in to fill it up
2388 * we will be able to slide it forward. Really I
2389 * don't think this should happen :-0
2392 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2393 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2394 (uint32_t) asoc->mapping_array_size,
2395 SCTP_MAP_SLIDE_NONE);
2400 for (ii = 0; ii < distance; ii++) {
2401 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2402 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2405 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2406 asoc->mapping_array[ii] = 0;
2407 asoc->nr_mapping_array[ii] = 0;
2409 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2410 asoc->highest_tsn_inside_map += (slide_from << 3);
2412 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2413 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2415 asoc->mapping_array_base_tsn += (slide_from << 3);
2416 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2417 sctp_log_map(asoc->mapping_array_base_tsn,
2418 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2419 SCTP_MAP_SLIDE_RESULT);
2427 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2429 struct sctp_association *asoc;
2430 uint32_t highest_tsn;
2433 if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2434 asoc->highest_tsn_inside_map,
2436 highest_tsn = asoc->highest_tsn_inside_nr_map;
2438 highest_tsn = asoc->highest_tsn_inside_map;
2442 * Now we need to see if we need to queue a sack or just start the
2443 * timer (if allowed).
2445 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2447 * Ok special case, in SHUTDOWN-SENT case. here we maker
2448 * sure SACK timer is off and instead send a SHUTDOWN and a
2451 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2452 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2453 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2455 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2456 sctp_send_sack(stcb);
2460 /* is there a gap now ? */
2461 is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
2464 * CMT DAC algorithm: increase number of packets received
2467 stcb->asoc.cmt_dac_pkts_rcvd++;
2469 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2471 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2473 (stcb->asoc.numduptsns) || /* we have dup's */
2474 (is_a_gap) || /* is still a gap */
2475 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2476 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2479 if ((stcb->asoc.sctp_cmt_on_off == 1) &&
2480 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2481 (stcb->asoc.send_sack == 0) &&
2482 (stcb->asoc.numduptsns == 0) &&
2483 (stcb->asoc.delayed_ack) &&
2484 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2487 * CMT DAC algorithm: With CMT, delay acks
2488 * even in the face of
2490 * reordering. Therefore, if acks that do not
2491 * have to be sent because of the above
2492 * reasons, will be delayed. That is, acks
2493 * that would have been sent due to gap
2494 * reports will be delayed with DAC. Start
2495 * the delayed ack timer.
2497 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2498 stcb->sctp_ep, stcb, NULL);
2501 * Ok we must build a SACK since the timer
2502 * is pending, we got our first packet OR
2503 * there are gaps or duplicates.
2505 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2506 sctp_send_sack(stcb);
2509 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2510 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2511 stcb->sctp_ep, stcb, NULL);
2518 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2520 struct sctp_tmit_chunk *chk;
2521 uint32_t tsize, pd_point;
2524 if (asoc->fragmented_delivery_inprogress) {
2525 sctp_service_reassembly(stcb, asoc);
2527 /* Can we proceed further, i.e. the PD-API is complete */
2528 if (asoc->fragmented_delivery_inprogress) {
2533 * Now is there some other chunk I can deliver from the reassembly
2537 chk = TAILQ_FIRST(&asoc->reasmqueue);
2539 asoc->size_on_reasm_queue = 0;
2540 asoc->cnt_on_reasm_queue = 0;
2543 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2544 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2545 ((nxt_todel == chk->rec.data.stream_seq) ||
2546 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2548 * Yep the first one is here. We setup to start reception,
2549 * by backing down the TSN just in case we can't deliver.
2553 * Before we start though either all of the message should
2554 * be here or the socket buffer max or nothing on the
2555 * delivery queue and something can be delivered.
2557 if (stcb->sctp_socket) {
2558 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2559 stcb->sctp_ep->partial_delivery_point);
2561 pd_point = stcb->sctp_ep->partial_delivery_point;
2563 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2564 asoc->fragmented_delivery_inprogress = 1;
2565 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2566 asoc->str_of_pdapi = chk->rec.data.stream_number;
2567 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2568 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2569 asoc->fragment_flags = chk->rec.data.rcv_flags;
2570 sctp_service_reassembly(stcb, asoc);
2571 if (asoc->fragmented_delivery_inprogress == 0) {
2579 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2580 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2581 struct sctp_nets *net, uint32_t * high_tsn)
2583 struct sctp_data_chunk *ch, chunk_buf;
2584 struct sctp_association *asoc;
2585 int num_chunks = 0; /* number of control chunks processed */
2587 int chk_length, break_flag, last_chunk;
2588 int abort_flag = 0, was_a_gap;
2590 uint32_t highest_tsn;
2593 sctp_set_rwnd(stcb, &stcb->asoc);
2596 SCTP_TCB_LOCK_ASSERT(stcb);
2598 if (compare_with_wrap(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map, MAX_TSN)) {
2599 highest_tsn = asoc->highest_tsn_inside_nr_map;
2601 highest_tsn = asoc->highest_tsn_inside_map;
2603 was_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
2605 * setup where we got the last DATA packet from for any SACK that
2606 * may need to go out. Don't bump the net. This is done ONLY when a
2607 * chunk is assigned.
2609 asoc->last_data_chunk_from = net;
2612 * Now before we proceed we must figure out if this is a wasted
2613 * cluster... i.e. it is a small packet sent in and yet the driver
2614 * underneath allocated a full cluster for it. If so we must copy it
2615 * to a smaller mbuf and free up the cluster mbuf. This will help
2616 * with cluster starvation. Note for __Panda__ we don't do this
2617 * since it has clusters all the way down to 64 bytes.
2619 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2620 /* we only handle mbufs that are singletons.. not chains */
2621 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2623 /* ok lets see if we can copy the data up */
2626 /* get the pointers and copy */
2627 to = mtod(m, caddr_t *);
2628 from = mtod((*mm), caddr_t *);
2629 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2630 /* copy the length and free up the old */
2631 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2633 /* sucess, back copy */
2636 /* We are in trouble in the mbuf world .. yikes */
2640 /* get pointer to the first chunk header */
2641 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2642 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2647 * process all DATA chunks...
2649 *high_tsn = asoc->cumulative_tsn;
2651 asoc->data_pkts_seen++;
2652 while (stop_proc == 0) {
2653 /* validate chunk length */
2654 chk_length = ntohs(ch->ch.chunk_length);
2655 if (length - *offset < chk_length) {
2656 /* all done, mutulated chunk */
2660 if (ch->ch.chunk_type == SCTP_DATA) {
2661 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2663 * Need to send an abort since we had a
2664 * invalid data chunk.
2666 struct mbuf *op_err;
2668 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2669 0, M_DONTWAIT, 1, MT_DATA);
2672 struct sctp_paramhdr *ph;
2675 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2676 (2 * sizeof(uint32_t));
2677 ph = mtod(op_err, struct sctp_paramhdr *);
2679 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2680 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2681 ippp = (uint32_t *) (ph + 1);
2682 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2684 *ippp = asoc->cumulative_tsn;
2687 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2688 sctp_abort_association(inp, stcb, m, iphlen, sh,
2689 op_err, 0, net->port);
2692 #ifdef SCTP_AUDITING_ENABLED
2693 sctp_audit_log(0xB1, 0);
2695 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2700 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2701 chk_length, net, high_tsn, &abort_flag, &break_flag,
2710 * Set because of out of rwnd space and no
2711 * drop rep space left.
2717 /* not a data chunk in the data region */
2718 switch (ch->ch.chunk_type) {
2719 case SCTP_INITIATION:
2720 case SCTP_INITIATION_ACK:
2721 case SCTP_SELECTIVE_ACK:
2722 case SCTP_NR_SELECTIVE_ACK: /* EY */
2723 case SCTP_HEARTBEAT_REQUEST:
2724 case SCTP_HEARTBEAT_ACK:
2725 case SCTP_ABORT_ASSOCIATION:
2727 case SCTP_SHUTDOWN_ACK:
2728 case SCTP_OPERATION_ERROR:
2729 case SCTP_COOKIE_ECHO:
2730 case SCTP_COOKIE_ACK:
2733 case SCTP_SHUTDOWN_COMPLETE:
2734 case SCTP_AUTHENTICATION:
2735 case SCTP_ASCONF_ACK:
2736 case SCTP_PACKET_DROPPED:
2737 case SCTP_STREAM_RESET:
2738 case SCTP_FORWARD_CUM_TSN:
2741 * Now, what do we do with KNOWN chunks that
2742 * are NOT in the right place?
2744 * For now, I do nothing but ignore them. We
2745 * may later want to add sysctl stuff to
2746 * switch out and do either an ABORT() or
2747 * possibly process them.
2749 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2750 struct mbuf *op_err;
2752 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2753 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2758 /* unknown chunk type, use bit rules */
2759 if (ch->ch.chunk_type & 0x40) {
2760 /* Add a error report to the queue */
2762 struct sctp_paramhdr *phd;
2764 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2766 phd = mtod(merr, struct sctp_paramhdr *);
2768 * We cheat and use param
2769 * type since we did not
2770 * bother to define a error
2771 * cause struct. They are
2772 * the same basic format
2773 * with different names.
2776 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2778 htons(chk_length + sizeof(*phd));
2779 SCTP_BUF_LEN(merr) = sizeof(*phd);
2780 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2781 SCTP_SIZE32(chk_length),
2783 if (SCTP_BUF_NEXT(merr)) {
2784 sctp_queue_op_err(stcb, merr);
2790 if ((ch->ch.chunk_type & 0x80) == 0) {
2791 /* discard the rest of this packet */
2793 } /* else skip this bad chunk and
2796 }; /* switch of chunk type */
2798 *offset += SCTP_SIZE32(chk_length);
2799 if ((*offset >= length) || stop_proc) {
2800 /* no more data left in the mbuf chain */
2804 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2805 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2815 * we need to report rwnd overrun drops.
2817 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2821 * Did we get data, if so update the time for auto-close and
2822 * give peer credit for being alive.
2824 SCTP_STAT_INCR(sctps_recvpktwithdata);
2825 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2826 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2827 stcb->asoc.overall_error_count,
2829 SCTP_FROM_SCTP_INDATA,
2832 stcb->asoc.overall_error_count = 0;
2833 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2835 /* now service all of the reassm queue if needed */
2836 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2837 sctp_service_queues(stcb, asoc);
2839 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2840 /* Assure that we ack right away */
2841 stcb->asoc.send_sack = 1;
2843 /* Start a sack timer or QUEUE a SACK for sending */
2844 sctp_sack_check(stcb, was_a_gap, &abort_flag);
2852 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2853 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2855 uint32_t * biggest_newly_acked_tsn,
2856 uint32_t * this_sack_lowest_newack,
2859 struct sctp_tmit_chunk *tp1;
2860 unsigned int theTSN;
2861 int j, wake_him = 0, circled = 0;
2863 /* Recover the tp1 we last saw */
2866 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2868 for (j = frag_strt; j <= frag_end; j++) {
2869 theTSN = j + last_tsn;
2871 if (tp1->rec.data.doing_fast_retransmit)
2875 * CMT: CUCv2 algorithm. For each TSN being
2876 * processed from the sent queue, track the
2877 * next expected pseudo-cumack, or
2878 * rtx_pseudo_cumack, if required. Separate
2879 * cumack trackers for first transmissions,
2880 * and retransmissions.
2882 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2883 (tp1->snd_count == 1)) {
2884 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2885 tp1->whoTo->find_pseudo_cumack = 0;
2887 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2888 (tp1->snd_count > 1)) {
2889 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2890 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2892 if (tp1->rec.data.TSN_seq == theTSN) {
2893 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2895 * must be held until
2899 * ECN Nonce: Add the nonce
2900 * value to the sender's
2903 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2905 * If it is less than RESEND, it is
2906 * now no-longer in flight.
2907 * Higher values may already be set
2908 * via previous Gap Ack Blocks...
2909 * i.e. ACKED or RESEND.
2911 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2912 *biggest_newly_acked_tsn, MAX_TSN)) {
2913 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2916 * CMT: SFR algo (and HTNA) - set
2917 * saw_newack to 1 for dest being
2918 * newly acked. update
2919 * this_sack_highest_newack if
2922 if (tp1->rec.data.chunk_was_revoked == 0)
2923 tp1->whoTo->saw_newack = 1;
2925 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2926 tp1->whoTo->this_sack_highest_newack,
2928 tp1->whoTo->this_sack_highest_newack =
2929 tp1->rec.data.TSN_seq;
2932 * CMT DAC algo: also update
2933 * this_sack_lowest_newack
2935 if (*this_sack_lowest_newack == 0) {
2936 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2937 sctp_log_sack(*this_sack_lowest_newack,
2939 tp1->rec.data.TSN_seq,
2942 SCTP_LOG_TSN_ACKED);
2944 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2947 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2948 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2949 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2950 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2951 * Separate pseudo_cumack trackers for first transmissions and
2954 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2955 if (tp1->rec.data.chunk_was_revoked == 0) {
2956 tp1->whoTo->new_pseudo_cumack = 1;
2958 tp1->whoTo->find_pseudo_cumack = 1;
2960 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2961 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2963 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2964 if (tp1->rec.data.chunk_was_revoked == 0) {
2965 tp1->whoTo->new_pseudo_cumack = 1;
2967 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2969 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2970 sctp_log_sack(*biggest_newly_acked_tsn,
2972 tp1->rec.data.TSN_seq,
2975 SCTP_LOG_TSN_ACKED);
2977 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2978 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2979 tp1->whoTo->flight_size,
2981 (uintptr_t) tp1->whoTo,
2982 tp1->rec.data.TSN_seq);
2984 sctp_flight_size_decrease(tp1);
2985 sctp_total_flight_decrease(stcb, tp1);
2987 tp1->whoTo->net_ack += tp1->send_size;
2988 if (tp1->snd_count < 2) {
2990 * True non-retransmited chunk
2992 tp1->whoTo->net_ack2 += tp1->send_size;
2999 sctp_calculate_rto(stcb,
3002 &tp1->sent_rcv_time,
3003 sctp_align_safe_nocopy);
3008 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3009 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3010 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3011 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3012 stcb->asoc.this_sack_highest_gap,
3014 stcb->asoc.this_sack_highest_gap =
3015 tp1->rec.data.TSN_seq;
3017 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3018 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3019 #ifdef SCTP_AUDITING_ENABLED
3020 sctp_audit_log(0xB2,
3021 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3026 * All chunks NOT UNSENT fall through here and are marked
3027 * (leave PR-SCTP ones that are to skip alone though)
3029 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3030 tp1->sent = SCTP_DATAGRAM_MARKED;
3032 if (tp1->rec.data.chunk_was_revoked) {
3033 /* deflate the cwnd */
3034 tp1->whoTo->cwnd -= tp1->book_size;
3035 tp1->rec.data.chunk_was_revoked = 0;
3037 /* NR Sack code here */
3044 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3045 sctp_m_freem(tp1->data);
3052 } /* if (tp1->TSN_seq == theTSN) */
3053 if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3057 tp1 = TAILQ_NEXT(tp1, sctp_next);
3058 if ((tp1 == NULL) && (circled == 0)) {
3060 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3062 } /* end while (tp1) */
3065 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3067 /* In case the fragments were not in order we must reset */
3068 } /* end for (j = fragStart */
3070 return (wake_him); /* Return value only used for nr-sack */
3075 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3076 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3077 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3078 int num_seg, int num_nr_seg, int *ecn_seg_sums)
3080 struct sctp_gap_ack_block *frag, block;
3081 struct sctp_tmit_chunk *tp1;
3086 uint16_t frag_strt, frag_end, prev_frag_end;
3088 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3092 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3095 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3097 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3098 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3099 *offset += sizeof(block);
3101 return (chunk_freed);
3103 frag_strt = ntohs(frag->start);
3104 frag_end = ntohs(frag->end);
3106 if (frag_strt > frag_end) {
3107 /* This gap report is malformed, skip it. */
3110 if (frag_strt <= prev_frag_end) {
3111 /* This gap report is not in order, so restart. */
3112 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3114 if (compare_with_wrap((last_tsn + frag_end), *biggest_tsn_acked, MAX_TSN)) {
3115 *biggest_tsn_acked = last_tsn + frag_end;
3122 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3123 non_revocable, &num_frs, biggest_newly_acked_tsn,
3124 this_sack_lowest_newack, ecn_seg_sums)) {
3127 prev_frag_end = frag_end;
3129 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3131 sctp_log_fr(*biggest_tsn_acked,
3132 *biggest_newly_acked_tsn,
3133 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3135 return (chunk_freed);
3139 sctp_check_for_revoked(struct sctp_tcb *stcb,
3140 struct sctp_association *asoc, uint32_t cumack,
3141 uint32_t biggest_tsn_acked)
3143 struct sctp_tmit_chunk *tp1;
3144 int tot_revoked = 0;
3146 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3148 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3151 * ok this guy is either ACK or MARKED. If it is
3152 * ACKED it has been previously acked but not this
3153 * time i.e. revoked. If it is MARKED it was ACK'ed
3156 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3161 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3162 /* it has been revoked */
3163 tp1->sent = SCTP_DATAGRAM_SENT;
3164 tp1->rec.data.chunk_was_revoked = 1;
3166 * We must add this stuff back in to assure
3167 * timers and such get started.
3169 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3170 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3171 tp1->whoTo->flight_size,
3173 (uintptr_t) tp1->whoTo,
3174 tp1->rec.data.TSN_seq);
3176 sctp_flight_size_increase(tp1);
3177 sctp_total_flight_increase(stcb, tp1);
3179 * We inflate the cwnd to compensate for our
3180 * artificial inflation of the flight_size.
3182 tp1->whoTo->cwnd += tp1->book_size;
3184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3185 sctp_log_sack(asoc->last_acked_seq,
3187 tp1->rec.data.TSN_seq,
3190 SCTP_LOG_TSN_REVOKED);
3192 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3193 /* it has been re-acked in this SACK */
3194 tp1->sent = SCTP_DATAGRAM_ACKED;
3197 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3199 tp1 = TAILQ_NEXT(tp1, sctp_next);
3201 if (tot_revoked > 0) {
3203 * Setup the ecn nonce re-sync point. We do this since once
3204 * data is revoked we begin to retransmit things, which do
3205 * NOT have the ECN bits set. This means we are now out of
3206 * sync and must wait until we get back in sync with the
3207 * peer to check ECN bits.
3209 tp1 = TAILQ_FIRST(&asoc->send_queue);
3211 asoc->nonce_resync_tsn = asoc->sending_seq;
3213 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3215 asoc->nonce_wait_for_ecne = 0;
3216 asoc->nonce_sum_check = 0;
3222 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3223 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3225 struct sctp_tmit_chunk *tp1;
3226 int strike_flag = 0;
3228 int tot_retrans = 0;
3229 uint32_t sending_seq;
3230 struct sctp_nets *net;
3231 int num_dests_sacked = 0;
3234 * select the sending_seq, this is either the next thing ready to be
3235 * sent but not transmitted, OR, the next seq we assign.
3237 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3239 sending_seq = asoc->sending_seq;
3241 sending_seq = tp1->rec.data.TSN_seq;
3244 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3245 if ((asoc->sctp_cmt_on_off == 1) &&
3246 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3247 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3248 if (net->saw_newack)
3252 if (stcb->asoc.peer_supports_prsctp) {
3253 (void)SCTP_GETTIME_TIMEVAL(&now);
3255 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3258 if (tp1->no_fr_allowed) {
3259 /* this one had a timeout or something */
3260 tp1 = TAILQ_NEXT(tp1, sctp_next);
3263 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3264 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3265 sctp_log_fr(biggest_tsn_newly_acked,
3266 tp1->rec.data.TSN_seq,
3268 SCTP_FR_LOG_CHECK_STRIKE);
3270 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3272 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3276 if (stcb->asoc.peer_supports_prsctp) {
3277 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3278 /* Is it expired? */
3279 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3280 /* Yes so drop it */
3281 if (tp1->data != NULL) {
3282 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3283 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3284 SCTP_SO_NOT_LOCKED);
3286 tp1 = TAILQ_NEXT(tp1, sctp_next);
3291 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3292 asoc->this_sack_highest_gap, MAX_TSN)) {
3293 /* we are beyond the tsn in the sack */
3296 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3297 /* either a RESEND, ACKED, or MARKED */
3299 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3300 /* Continue strikin FWD-TSN chunks */
3301 tp1->rec.data.fwd_tsn_cnt++;
3303 tp1 = TAILQ_NEXT(tp1, sctp_next);
3307 * CMT : SFR algo (covers part of DAC and HTNA as well)
3309 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3311 * No new acks were receieved for data sent to this
3312 * dest. Therefore, according to the SFR algo for
3313 * CMT, no data sent to this dest can be marked for
3314 * FR using this SACK.
3316 tp1 = TAILQ_NEXT(tp1, sctp_next);
3318 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3319 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3321 * CMT: New acks were receieved for data sent to
3322 * this dest. But no new acks were seen for data
3323 * sent after tp1. Therefore, according to the SFR
3324 * algo for CMT, tp1 cannot be marked for FR using
3325 * this SACK. This step covers part of the DAC algo
3326 * and the HTNA algo as well.
3328 tp1 = TAILQ_NEXT(tp1, sctp_next);
3332 * Here we check to see if we were have already done a FR
3333 * and if so we see if the biggest TSN we saw in the sack is
3334 * smaller than the recovery point. If so we don't strike
3335 * the tsn... otherwise we CAN strike the TSN.
3338 * @@@ JRI: Check for CMT if (accum_moved &&
3339 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3342 if (accum_moved && asoc->fast_retran_loss_recovery) {
3344 * Strike the TSN if in fast-recovery and cum-ack
3347 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3348 sctp_log_fr(biggest_tsn_newly_acked,
3349 tp1->rec.data.TSN_seq,
3351 SCTP_FR_LOG_STRIKE_CHUNK);
3353 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3356 if ((asoc->sctp_cmt_on_off == 1) &&
3357 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3359 * CMT DAC algorithm: If SACK flag is set to
3360 * 0, then lowest_newack test will not pass
3361 * because it would have been set to the
3362 * cumack earlier. If not already to be
3363 * rtx'd, If not a mixed sack and if tp1 is
3364 * not between two sacked TSNs, then mark by
3365 * one more. NOTE that we are marking by one
3366 * additional time since the SACK DAC flag
3367 * indicates that two packets have been
3368 * received after this missing TSN.
3370 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3371 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3372 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3373 sctp_log_fr(16 + num_dests_sacked,
3374 tp1->rec.data.TSN_seq,
3376 SCTP_FR_LOG_STRIKE_CHUNK);
3381 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3382 (asoc->sctp_cmt_on_off == 0)) {
3384 * For those that have done a FR we must take
3385 * special consideration if we strike. I.e the
3386 * biggest_newly_acked must be higher than the
3387 * sending_seq at the time we did the FR.
3390 #ifdef SCTP_FR_TO_ALTERNATE
3392 * If FR's go to new networks, then we must only do
3393 * this for singly homed asoc's. However if the FR's
3394 * go to the same network (Armando's work) then its
3395 * ok to FR multiple times.
3403 if ((compare_with_wrap(biggest_tsn_newly_acked,
3404 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3405 (biggest_tsn_newly_acked ==
3406 tp1->rec.data.fast_retran_tsn)) {
3408 * Strike the TSN, since this ack is
3409 * beyond where things were when we
3412 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3413 sctp_log_fr(biggest_tsn_newly_acked,
3414 tp1->rec.data.TSN_seq,
3416 SCTP_FR_LOG_STRIKE_CHUNK);
3418 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3422 if ((asoc->sctp_cmt_on_off == 1) &&
3423 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3425 * CMT DAC algorithm: If
3426 * SACK flag is set to 0,
3427 * then lowest_newack test
3428 * will not pass because it
3429 * would have been set to
3430 * the cumack earlier. If
3431 * not already to be rtx'd,
3432 * If not a mixed sack and
3433 * if tp1 is not between two
3434 * sacked TSNs, then mark by
3435 * one more. NOTE that we
3436 * are marking by one
3437 * additional time since the
3438 * SACK DAC flag indicates
3439 * that two packets have
3440 * been received after this
3443 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3444 (num_dests_sacked == 1) &&
3445 compare_with_wrap(this_sack_lowest_newack,
3446 tp1->rec.data.TSN_seq, MAX_TSN)) {
3447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3448 sctp_log_fr(32 + num_dests_sacked,
3449 tp1->rec.data.TSN_seq,
3451 SCTP_FR_LOG_STRIKE_CHUNK);
3453 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3461 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3464 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3465 biggest_tsn_newly_acked, MAX_TSN)) {
3467 * We don't strike these: This is the HTNA
3468 * algorithm i.e. we don't strike If our TSN is
3469 * larger than the Highest TSN Newly Acked.
3473 /* Strike the TSN */
3474 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3475 sctp_log_fr(biggest_tsn_newly_acked,
3476 tp1->rec.data.TSN_seq,
3478 SCTP_FR_LOG_STRIKE_CHUNK);
3480 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3483 if ((asoc->sctp_cmt_on_off == 1) &&
3484 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3486 * CMT DAC algorithm: If SACK flag is set to
3487 * 0, then lowest_newack test will not pass
3488 * because it would have been set to the
3489 * cumack earlier. If not already to be
3490 * rtx'd, If not a mixed sack and if tp1 is
3491 * not between two sacked TSNs, then mark by
3492 * one more. NOTE that we are marking by one
3493 * additional time since the SACK DAC flag
3494 * indicates that two packets have been
3495 * received after this missing TSN.
3497 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3498 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3499 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3500 sctp_log_fr(48 + num_dests_sacked,
3501 tp1->rec.data.TSN_seq,
3503 SCTP_FR_LOG_STRIKE_CHUNK);
3509 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3510 struct sctp_nets *alt;
3512 /* fix counts and things */
3513 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3514 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3515 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3517 (uintptr_t) tp1->whoTo,
3518 tp1->rec.data.TSN_seq);
3521 tp1->whoTo->net_ack++;
3522 sctp_flight_size_decrease(tp1);
3524 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3525 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3526 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3528 /* add back to the rwnd */
3529 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3531 /* remove from the total flight */
3532 sctp_total_flight_decrease(stcb, tp1);
3534 if ((stcb->asoc.peer_supports_prsctp) &&
3535 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3537 * Has it been retransmitted tv_sec times? -
3538 * we store the retran count there.
3540 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3541 /* Yes, so drop it */
3542 if (tp1->data != NULL) {
3543 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3544 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3545 SCTP_SO_NOT_LOCKED);
3547 /* Make sure to flag we had a FR */
3548 tp1->whoTo->net_ack++;
3549 tp1 = TAILQ_NEXT(tp1, sctp_next);
3553 /* printf("OK, we are now ready to FR this guy\n"); */
3554 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3555 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3559 /* This is a subsequent FR */
3560 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3562 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3563 if (asoc->sctp_cmt_on_off == 1) {
3565 * CMT: Using RTX_SSTHRESH policy for CMT.
3566 * If CMT is being used, then pick dest with
3567 * largest ssthresh for any retransmission.
3569 tp1->no_fr_allowed = 1;
3571 /* sa_ignore NO_NULL_CHK */
3572 if (asoc->sctp_cmt_pf > 0) {
3574 * JRS 5/18/07 - If CMT PF is on,
3575 * use the PF version of
3578 alt = sctp_find_alternate_net(stcb, alt, 2);
3581 * JRS 5/18/07 - If only CMT is on,
3582 * use the CMT version of
3585 /* sa_ignore NO_NULL_CHK */
3586 alt = sctp_find_alternate_net(stcb, alt, 1);
3592 * CUCv2: If a different dest is picked for
3593 * the retransmission, then new
3594 * (rtx-)pseudo_cumack needs to be tracked
3595 * for orig dest. Let CUCv2 track new (rtx-)
3596 * pseudo-cumack always.
3599 tp1->whoTo->find_pseudo_cumack = 1;
3600 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3602 } else {/* CMT is OFF */
3604 #ifdef SCTP_FR_TO_ALTERNATE
3605 /* Can we find an alternate? */
3606 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3609 * default behavior is to NOT retransmit
3610 * FR's to an alternate. Armando Caro's
3611 * paper details why.
3617 tp1->rec.data.doing_fast_retransmit = 1;
3619 /* mark the sending seq for possible subsequent FR's */
3621 * printf("Marking TSN for FR new value %x\n",
3622 * (uint32_t)tpi->rec.data.TSN_seq);
3624 if (TAILQ_EMPTY(&asoc->send_queue)) {
3626 * If the queue of send is empty then its
3627 * the next sequence number that will be
3628 * assigned so we subtract one from this to
3629 * get the one we last sent.
3631 tp1->rec.data.fast_retran_tsn = sending_seq;
3634 * If there are chunks on the send queue
3635 * (unsent data that has made it from the
3636 * stream queues but not out the door, we
3637 * take the first one (which will have the
3638 * lowest TSN) and subtract one to get the
3641 struct sctp_tmit_chunk *ttt;
3643 ttt = TAILQ_FIRST(&asoc->send_queue);
3644 tp1->rec.data.fast_retran_tsn =
3645 ttt->rec.data.TSN_seq;
3650 * this guy had a RTO calculation pending on
3655 if (alt != tp1->whoTo) {
3656 /* yes, there is an alternate. */
3657 sctp_free_remote_addr(tp1->whoTo);
3658 /* sa_ignore FREED_MEMORY */
3660 atomic_add_int(&alt->ref_count, 1);
3663 tp1 = TAILQ_NEXT(tp1, sctp_next);
3666 if (tot_retrans > 0) {
3668 * Setup the ecn nonce re-sync point. We do this since once
3669 * we go to FR something we introduce a Karn's rule scenario
3670 * and won't know the totals for the ECN bits.
3672 asoc->nonce_resync_tsn = sending_seq;
3673 asoc->nonce_wait_for_ecne = 0;
3674 asoc->nonce_sum_check = 0;
3678 struct sctp_tmit_chunk *
3679 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3680 struct sctp_association *asoc)
3682 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3686 if (asoc->peer_supports_prsctp == 0) {
3689 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3691 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3692 tp1->sent != SCTP_DATAGRAM_RESEND) {
3693 /* no chance to advance, out of here */
3696 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3697 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3698 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3699 asoc->advanced_peer_ack_point,
3700 tp1->rec.data.TSN_seq, 0, 0);
3703 if (!PR_SCTP_ENABLED(tp1->flags)) {
3705 * We can't fwd-tsn past any that are reliable aka
3706 * retransmitted until the asoc fails.
3711 (void)SCTP_GETTIME_TIMEVAL(&now);
3714 tp2 = TAILQ_NEXT(tp1, sctp_next);
3716 * now we got a chunk which is marked for another
3717 * retransmission to a PR-stream but has run out its chances
3718 * already maybe OR has been marked to skip now. Can we skip
3719 * it if its a resend?
3721 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3722 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3724 * Now is this one marked for resend and its time is
3727 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3728 /* Yes so drop it */
3730 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3731 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3732 SCTP_SO_NOT_LOCKED);
3736 * No, we are done when hit one for resend
3737 * whos time as not expired.
3743 * Ok now if this chunk is marked to drop it we can clean up
3744 * the chunk, advance our peer ack point and we can check
3747 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3748 /* advance PeerAckPoint goes forward */
3749 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3750 asoc->advanced_peer_ack_point,
3753 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3755 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3756 /* No update but we do save the chk */
3761 * If it is still in RESEND we can advance no
3767 * If we hit here we just dumped tp1, move to next tsn on
3776 sctp_fs_audit(struct sctp_association *asoc)
3778 struct sctp_tmit_chunk *chk;
3779 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3780 int entry_flight, entry_cnt, ret;
3782 entry_flight = asoc->total_flight;
3783 entry_cnt = asoc->total_flight_count;
3786 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3789 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3790 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3791 printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3792 chk->rec.data.TSN_seq,
3797 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3799 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3801 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3808 if ((inflight > 0) || (inbetween > 0)) {
3810 panic("Flight size-express incorrect? \n");
3812 printf("asoc->total_flight:%d cnt:%d\n",
3813 entry_flight, entry_cnt);
3815 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3816 inflight, inbetween, resend, above, acked);
3825 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3826 struct sctp_association *asoc,
3827 struct sctp_nets *net,
3828 struct sctp_tmit_chunk *tp1)
3830 tp1->window_probe = 0;
3831 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3832 /* TSN's skipped we do NOT move back. */
3833 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3834 tp1->whoTo->flight_size,
3836 (uintptr_t) tp1->whoTo,
3837 tp1->rec.data.TSN_seq);
3840 /* First setup this by shrinking flight */
3841 sctp_flight_size_decrease(tp1);
3842 sctp_total_flight_decrease(stcb, tp1);
3843 /* Now mark for resend */
3844 tp1->sent = SCTP_DATAGRAM_RESEND;
3845 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3848 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3849 tp1->whoTo->flight_size,
3851 (uintptr_t) tp1->whoTo,
3852 tp1->rec.data.TSN_seq);
3857 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3858 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3860 struct sctp_nets *net;
3861 struct sctp_association *asoc;
3862 struct sctp_tmit_chunk *tp1, *tp2;
3864 int win_probe_recovery = 0;
3865 int win_probe_recovered = 0;
3866 int j, done_once = 0;
3868 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3869 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3870 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3872 SCTP_TCB_LOCK_ASSERT(stcb);
3873 #ifdef SCTP_ASOCLOG_OF_TSNS
3874 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3875 stcb->asoc.cumack_log_at++;
3876 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3877 stcb->asoc.cumack_log_at = 0;
3881 old_rwnd = asoc->peers_rwnd;
3882 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
3885 } else if (asoc->last_acked_seq == cumack) {
3886 /* Window update sack */
3887 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3888 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3889 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3890 /* SWS sender side engages */
3891 asoc->peers_rwnd = 0;
3893 if (asoc->peers_rwnd > old_rwnd) {
3898 /* First setup for CC stuff */
3899 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3900 net->prev_cwnd = net->cwnd;
3905 * CMT: Reset CUC and Fast recovery algo variables before
3908 net->new_pseudo_cumack = 0;
3909 net->will_exit_fast_recovery = 0;
3911 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3914 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3915 tp1 = TAILQ_LAST(&asoc->sent_queue,
3916 sctpchunk_listhead);
3917 send_s = tp1->rec.data.TSN_seq + 1;
3919 send_s = asoc->sending_seq;
3921 if ((cumack == send_s) ||
3922 compare_with_wrap(cumack, send_s, MAX_TSN)) {
3928 panic("Impossible sack 1");
3933 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3934 0, M_DONTWAIT, 1, MT_DATA);
3936 struct sctp_paramhdr *ph;
3939 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3941 ph = mtod(oper, struct sctp_paramhdr *);
3942 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3943 ph->param_length = htons(SCTP_BUF_LEN(oper));
3944 ippp = (uint32_t *) (ph + 1);
3945 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3947 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3948 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3953 asoc->this_sack_highest_gap = cumack;
3954 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3955 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3956 stcb->asoc.overall_error_count,
3958 SCTP_FROM_SCTP_INDATA,
3961 stcb->asoc.overall_error_count = 0;
3962 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
3963 /* process the new consecutive TSN first */
3964 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3966 tp2 = TAILQ_NEXT(tp1, sctp_next);
3967 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
3969 cumack == tp1->rec.data.TSN_seq) {
3970 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3971 printf("Warning, an unsent is now acked?\n");
3974 * ECN Nonce: Add the nonce to the sender's
3977 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3978 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3980 * If it is less than ACKED, it is
3981 * now no-longer in flight. Higher
3982 * values may occur during marking
3984 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3985 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3986 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3987 tp1->whoTo->flight_size,
3989 (uintptr_t) tp1->whoTo,
3990 tp1->rec.data.TSN_seq);
3992 sctp_flight_size_decrease(tp1);
3993 /* sa_ignore NO_NULL_CHK */
3994 sctp_total_flight_decrease(stcb, tp1);
3996 tp1->whoTo->net_ack += tp1->send_size;
3997 if (tp1->snd_count < 2) {
3999 * True non-retransmited
4002 tp1->whoTo->net_ack2 +=
4005 /* update RTO too? */
4012 sctp_calculate_rto(stcb,
4014 &tp1->sent_rcv_time,
4015 sctp_align_safe_nocopy);
4020 * CMT: CUCv2 algorithm. From the
4021 * cumack'd TSNs, for each TSN being
4022 * acked for the first time, set the
4023 * following variables for the
4024 * corresp destination.
4025 * new_pseudo_cumack will trigger a
4027 * find_(rtx_)pseudo_cumack will
4028 * trigger search for the next
4029 * expected (rtx-)pseudo-cumack.
4031 tp1->whoTo->new_pseudo_cumack = 1;
4032 tp1->whoTo->find_pseudo_cumack = 1;
4033 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4035 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4036 /* sa_ignore NO_NULL_CHK */
4037 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4040 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4041 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4043 if (tp1->rec.data.chunk_was_revoked) {
4044 /* deflate the cwnd */
4045 tp1->whoTo->cwnd -= tp1->book_size;
4046 tp1->rec.data.chunk_was_revoked = 0;
4048 tp1->sent = SCTP_DATAGRAM_ACKED;
4049 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4051 /* sa_ignore NO_NULL_CHK */
4052 sctp_free_bufspace(stcb, asoc, tp1, 1);
4053 sctp_m_freem(tp1->data);
4055 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4056 sctp_log_sack(asoc->last_acked_seq,
4058 tp1->rec.data.TSN_seq,
4061 SCTP_LOG_FREE_SENT);
4064 asoc->sent_queue_cnt--;
4065 sctp_free_a_chunk(stcb, tp1);
4073 /* sa_ignore NO_NULL_CHK */
4074 if (stcb->sctp_socket) {
4075 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4079 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4080 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4081 /* sa_ignore NO_NULL_CHK */
4082 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4084 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4085 so = SCTP_INP_SO(stcb->sctp_ep);
4086 atomic_add_int(&stcb->asoc.refcnt, 1);
4087 SCTP_TCB_UNLOCK(stcb);
4088 SCTP_SOCKET_LOCK(so, 1);
4089 SCTP_TCB_LOCK(stcb);
4090 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4091 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4092 /* assoc was freed while we were unlocked */
4093 SCTP_SOCKET_UNLOCK(so, 1);
4097 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4098 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4099 SCTP_SOCKET_UNLOCK(so, 1);
4102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4103 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4107 /* JRS - Use the congestion control given in the CC module */
4108 if (asoc->last_acked_seq != cumack)
4109 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4111 asoc->last_acked_seq = cumack;
4113 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4114 /* nothing left in-flight */
4115 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4116 net->flight_size = 0;
4117 net->partial_bytes_acked = 0;
4119 asoc->total_flight = 0;
4120 asoc->total_flight_count = 0;
4122 /* ECN Nonce updates */
4123 if (asoc->ecn_nonce_allowed) {
4124 if (asoc->nonce_sum_check) {
4125 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4126 if (asoc->nonce_wait_for_ecne == 0) {
4127 struct sctp_tmit_chunk *lchk;
4129 lchk = TAILQ_FIRST(&asoc->send_queue);
4130 asoc->nonce_wait_for_ecne = 1;
4132 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4134 asoc->nonce_wait_tsn = asoc->sending_seq;
4137 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4138 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4140 * Misbehaving peer. We need
4141 * to react to this guy
4143 asoc->ecn_allowed = 0;
4144 asoc->ecn_nonce_allowed = 0;
4149 /* See if Resynchronization Possible */
4150 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4151 asoc->nonce_sum_check = 1;
4153 * Now we must calculate what the base is.
4154 * We do this based on two things, we know
4155 * the total's for all the segments
4156 * gap-acked in the SACK (none). We also
4157 * know the SACK's nonce sum, its in
4158 * nonce_sum_flag. So we can build a truth
4159 * table to back-calculate the new value of
4160 * asoc->nonce_sum_expect_base:
4162 * SACK-flag-Value Seg-Sums Base 0 0 0
4166 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4171 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4172 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4173 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4174 /* SWS sender side engages */
4175 asoc->peers_rwnd = 0;
4177 if (asoc->peers_rwnd > old_rwnd) {
4178 win_probe_recovery = 1;
4180 /* Now assure a timer where data is queued at */
4183 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4186 if (win_probe_recovery && (net->window_probe)) {
4187 win_probe_recovered = 1;
4189 * Find first chunk that was used with window probe
4190 * and clear the sent
4192 /* sa_ignore FREED_MEMORY */
4193 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4194 if (tp1->window_probe) {
4195 /* move back to data send queue */
4196 sctp_window_probe_recovery(stcb, asoc, net, tp1);
4201 if (net->RTO == 0) {
4202 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4204 to_ticks = MSEC_TO_TICKS(net->RTO);
4206 if (net->flight_size) {
4208 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4209 sctp_timeout_handler, &net->rxt_timer);
4210 if (net->window_probe) {
4211 net->window_probe = 0;
4214 if (net->window_probe) {
4216 * In window probes we must assure a timer
4217 * is still running there
4219 net->window_probe = 0;
4220 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4221 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4222 sctp_timeout_handler, &net->rxt_timer);
4224 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4225 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4227 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4229 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4230 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4231 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4232 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4233 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4239 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4240 (asoc->sent_queue_retran_cnt == 0) &&
4241 (win_probe_recovered == 0) &&
4244 * huh, this should not happen unless all packets are
4245 * PR-SCTP and marked to skip of course.
4247 if (sctp_fs_audit(asoc)) {
4248 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4249 net->flight_size = 0;
4251 asoc->total_flight = 0;
4252 asoc->total_flight_count = 0;
4253 asoc->sent_queue_retran_cnt = 0;
4254 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4255 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4256 sctp_flight_size_increase(tp1);
4257 sctp_total_flight_increase(stcb, tp1);
4258 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4259 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4266 /**********************************/
4267 /* Now what about shutdown issues */
4268 /**********************************/
4269 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4270 /* nothing left on sendqueue.. consider done */
4272 if ((asoc->stream_queue_cnt == 1) &&
4273 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4274 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4275 (asoc->locked_on_sending)
4277 struct sctp_stream_queue_pending *sp;
4280 * I may be in a state where we got all across.. but
4281 * cannot write more due to a shutdown... we abort
4282 * since the user did not indicate EOR in this case.
4283 * The sp will be cleaned during free of the asoc.
4285 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4287 if ((sp) && (sp->length == 0)) {
4288 /* Let cleanup code purge it */
4289 if (sp->msg_is_complete) {
4290 asoc->stream_queue_cnt--;
4292 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4293 asoc->locked_on_sending = NULL;
4294 asoc->stream_queue_cnt--;
4298 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4299 (asoc->stream_queue_cnt == 0)) {
4300 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4301 /* Need to abort here */
4307 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4308 0, M_DONTWAIT, 1, MT_DATA);
4310 struct sctp_paramhdr *ph;
4313 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4315 ph = mtod(oper, struct sctp_paramhdr *);
4316 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4317 ph->param_length = htons(SCTP_BUF_LEN(oper));
4318 ippp = (uint32_t *) (ph + 1);
4319 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4321 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4322 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4324 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4325 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4326 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4328 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4329 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4330 sctp_stop_timers_for_shutdown(stcb);
4331 sctp_send_shutdown(stcb,
4332 stcb->asoc.primary_destination);
4333 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4334 stcb->sctp_ep, stcb, asoc->primary_destination);
4335 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4336 stcb->sctp_ep, stcb, asoc->primary_destination);
4338 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4339 (asoc->stream_queue_cnt == 0)) {
4340 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4343 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4344 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4345 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4346 sctp_send_shutdown_ack(stcb,
4347 stcb->asoc.primary_destination);
4348 sctp_stop_timers_for_shutdown(stcb);
4349 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4350 stcb->sctp_ep, stcb, asoc->primary_destination);
4353 /*********************************************/
4354 /* Here we perform PR-SCTP procedures */
4356 /*********************************************/
4357 /* C1. update advancedPeerAckPoint */
4358 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4359 asoc->advanced_peer_ack_point = cumack;
4361 /* PR-Sctp issues need to be addressed too */
4362 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4363 struct sctp_tmit_chunk *lchk;
4364 uint32_t old_adv_peer_ack_point;
4366 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4367 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4368 /* C3. See if we need to send a Fwd-TSN */
4369 if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4372 * ISSUE with ECN, see FWD-TSN processing for notes
4373 * on issues that will occur when the ECN NONCE
4374 * stuff is put into SCTP for cross checking.
4376 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4378 send_forward_tsn(stcb, asoc);
4380 * ECN Nonce: Disable Nonce Sum check when
4381 * FWD TSN is sent and store resync tsn
4383 asoc->nonce_sum_check = 0;
4384 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4386 /* try to FR fwd-tsn's that get lost too */
4387 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4388 send_forward_tsn(stcb, asoc);
4393 /* Assure a timer is up */
4394 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4395 stcb->sctp_ep, stcb, lchk->whoTo);
4398 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4399 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4401 stcb->asoc.peers_rwnd,
4402 stcb->asoc.total_flight,
4403 stcb->asoc.total_output_queue_size);
4408 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4409 struct sctp_tcb *stcb, struct sctp_nets *net_from,
4410 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4411 int *abort_now, uint8_t flags,
4412 uint32_t cum_ack, uint32_t rwnd)
4414 struct sctp_association *asoc;
4415 struct sctp_tmit_chunk *tp1, *tp2;
4416 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4417 uint32_t sav_cum_ack;
4418 uint16_t wake_him = 0;
4419 uint32_t send_s = 0;
4421 int accum_moved = 0;
4422 int will_exit_fast_recovery = 0;
4423 uint32_t a_rwnd, old_rwnd;
4424 int win_probe_recovery = 0;
4425 int win_probe_recovered = 0;
4426 struct sctp_nets *net = NULL;
4427 int nonce_sum_flag, ecn_seg_sums = 0;
4429 uint8_t reneged_all = 0;
4430 uint8_t cmt_dac_flag;
4433 * we take any chance we can to service our queues since we cannot
4434 * get awoken when the socket is read from :<
4437 * Now perform the actual SACK handling: 1) Verify that it is not an
4438 * old sack, if so discard. 2) If there is nothing left in the send
4439 * queue (cum-ack is equal to last acked) then you have a duplicate
4440 * too, update any rwnd change and verify no timers are running.
4441 * then return. 3) Process any new consequtive data i.e. cum-ack
4442 * moved process these first and note that it moved. 4) Process any
4443 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4444 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4445 * sync up flightsizes and things, stop all timers and also check
4446 * for shutdown_pending state. If so then go ahead and send off the
4447 * shutdown. If in shutdown recv, send off the shutdown-ack and
4448 * start that timer, Ret. 9) Strike any non-acked things and do FR
4449 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4450 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4451 * if in shutdown_recv state.
4453 SCTP_TCB_LOCK_ASSERT(stcb);
4455 this_sack_lowest_newack = 0;
4457 SCTP_STAT_INCR(sctps_slowpath_sack);
4459 nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4460 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4461 #ifdef SCTP_ASOCLOG_OF_TSNS
4462 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4463 stcb->asoc.cumack_log_at++;
4464 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4465 stcb->asoc.cumack_log_at = 0;
4470 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4471 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4472 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4474 old_rwnd = stcb->asoc.peers_rwnd;
4475 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4476 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4477 stcb->asoc.overall_error_count,
4479 SCTP_FROM_SCTP_INDATA,
4482 stcb->asoc.overall_error_count = 0;
4484 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4485 sctp_log_sack(asoc->last_acked_seq,
4492 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4494 uint32_t *dupdata, dblock;
4496 for (i = 0; i < num_dup; i++) {
4497 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4498 sizeof(uint32_t), (uint8_t *) & dblock);
4499 if (dupdata == NULL) {
4502 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4505 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4507 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4508 tp1 = TAILQ_LAST(&asoc->sent_queue,
4509 sctpchunk_listhead);
4510 send_s = tp1->rec.data.TSN_seq + 1;
4513 send_s = asoc->sending_seq;
4515 if (cum_ack == send_s ||
4516 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4520 * no way, we have not even sent this TSN out yet.
4521 * Peer is hopelessly messed up with us.
4523 printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4526 printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4527 tp1->rec.data.TSN_seq, tp1);
4532 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4533 0, M_DONTWAIT, 1, MT_DATA);
4535 struct sctp_paramhdr *ph;
4538 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4540 ph = mtod(oper, struct sctp_paramhdr *);
4541 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4542 ph->param_length = htons(SCTP_BUF_LEN(oper));
4543 ippp = (uint32_t *) (ph + 1);
4544 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4546 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4547 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4551 /**********************/
4552 /* 1) check the range */
4553 /**********************/
4554 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4555 /* acking something behind */
4558 sav_cum_ack = asoc->last_acked_seq;
4560 /* update the Rwnd of the peer */
4561 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4562 TAILQ_EMPTY(&asoc->send_queue) &&
4563 (asoc->stream_queue_cnt == 0)) {
4564 /* nothing left on send/sent and strmq */
4565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4566 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4567 asoc->peers_rwnd, 0, 0, a_rwnd);
4569 asoc->peers_rwnd = a_rwnd;
4570 if (asoc->sent_queue_retran_cnt) {
4571 asoc->sent_queue_retran_cnt = 0;
4573 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4574 /* SWS sender side engages */
4575 asoc->peers_rwnd = 0;
4577 /* stop any timers */
4578 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4579 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4580 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4581 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4582 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4583 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4584 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4585 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4588 net->partial_bytes_acked = 0;
4589 net->flight_size = 0;
4591 asoc->total_flight = 0;
4592 asoc->total_flight_count = 0;
4596 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4597 * things. The total byte count acked is tracked in netAckSz AND
4598 * netAck2 is used to track the total bytes acked that are un-
4599 * amibguious and were never retransmitted. We track these on a per
4600 * destination address basis.
4602 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4603 net->prev_cwnd = net->cwnd;
4608 * CMT: Reset CUC and Fast recovery algo variables before
4611 net->new_pseudo_cumack = 0;
4612 net->will_exit_fast_recovery = 0;
4614 /* process the new consecutive TSN first */
4615 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4617 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4619 last_tsn == tp1->rec.data.TSN_seq) {
4620 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4622 * ECN Nonce: Add the nonce to the sender's
4625 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4627 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4629 * If it is less than ACKED, it is
4630 * now no-longer in flight. Higher
4631 * values may occur during marking
4633 if ((tp1->whoTo->dest_state &
4634 SCTP_ADDR_UNCONFIRMED) &&
4635 (tp1->snd_count < 2)) {
4637 * If there was no retran
4638 * and the address is
4639 * un-confirmed and we sent
4641 * sacked.. its confirmed,
4644 tp1->whoTo->dest_state &=
4645 ~SCTP_ADDR_UNCONFIRMED;
4647 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4648 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4649 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4650 tp1->whoTo->flight_size,
4652 (uintptr_t) tp1->whoTo,
4653 tp1->rec.data.TSN_seq);
4655 sctp_flight_size_decrease(tp1);
4656 sctp_total_flight_decrease(stcb, tp1);
4658 tp1->whoTo->net_ack += tp1->send_size;
4660 /* CMT SFR and DAC algos */
4661 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4662 tp1->whoTo->saw_newack = 1;
4664 if (tp1->snd_count < 2) {
4666 * True non-retransmited
4669 tp1->whoTo->net_ack2 +=
4672 /* update RTO too? */
4675 sctp_calculate_rto(stcb,
4677 &tp1->sent_rcv_time,
4678 sctp_align_safe_nocopy);
4683 * CMT: CUCv2 algorithm. From the
4684 * cumack'd TSNs, for each TSN being
4685 * acked for the first time, set the
4686 * following variables for the
4687 * corresp destination.
4688 * new_pseudo_cumack will trigger a
4690 * find_(rtx_)pseudo_cumack will
4691 * trigger search for the next
4692 * expected (rtx-)pseudo-cumack.
4694 tp1->whoTo->new_pseudo_cumack = 1;
4695 tp1->whoTo->find_pseudo_cumack = 1;
4696 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4699 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4700 sctp_log_sack(asoc->last_acked_seq,
4702 tp1->rec.data.TSN_seq,
4705 SCTP_LOG_TSN_ACKED);
4707 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4708 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4711 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4712 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4713 #ifdef SCTP_AUDITING_ENABLED
4714 sctp_audit_log(0xB3,
4715 (asoc->sent_queue_retran_cnt & 0x000000ff));
4718 if (tp1->rec.data.chunk_was_revoked) {
4719 /* deflate the cwnd */
4720 tp1->whoTo->cwnd -= tp1->book_size;
4721 tp1->rec.data.chunk_was_revoked = 0;
4723 tp1->sent = SCTP_DATAGRAM_ACKED;
4728 tp1 = TAILQ_NEXT(tp1, sctp_next);
4730 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4731 /* always set this up to cum-ack */
4732 asoc->this_sack_highest_gap = last_tsn;
4734 if ((num_seg > 0) || (num_nr_seg > 0)) {
4737 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4738 * to be greater than the cumack. Also reset saw_newack to 0
4741 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4742 net->saw_newack = 0;
4743 net->this_sack_highest_newack = last_tsn;
4747 * thisSackHighestGap will increase while handling NEW
4748 * segments this_sack_highest_newack will increase while
4749 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4750 * used for CMT DAC algo. saw_newack will also change.
4752 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4753 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4754 num_seg, num_nr_seg, &ecn_seg_sums)) {
4757 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4759 * validate the biggest_tsn_acked in the gap acks if
4760 * strict adherence is wanted.
4762 if ((biggest_tsn_acked == send_s) ||
4763 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4765 * peer is either confused or we are under
4766 * attack. We must abort.
4768 printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4776 /*******************************************/
4777 /* cancel ALL T3-send timer if accum moved */
4778 /*******************************************/
4779 if (asoc->sctp_cmt_on_off == 1) {
4780 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4781 if (net->new_pseudo_cumack)
4782 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4784 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4789 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4790 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4791 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4795 /********************************************/
4796 /* drop the acked chunks from the sentqueue */
4797 /********************************************/
4798 asoc->last_acked_seq = cum_ack;
4800 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4804 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4808 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4809 /* no more sent on list */
4810 printf("Warning, tp1->sent == %d and its now acked?\n",
4813 tp2 = TAILQ_NEXT(tp1, sctp_next);
4814 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4815 if (tp1->pr_sctp_on) {
4816 if (asoc->pr_sctp_cnt != 0)
4817 asoc->pr_sctp_cnt--;
4819 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4820 (asoc->total_flight > 0)) {
4822 panic("Warning flight size is postive and should be 0");
4824 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4825 asoc->total_flight);
4827 asoc->total_flight = 0;
4830 /* sa_ignore NO_NULL_CHK */
4831 sctp_free_bufspace(stcb, asoc, tp1, 1);
4832 sctp_m_freem(tp1->data);
4833 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4834 asoc->sent_queue_cnt_removeable--;
4837 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4838 sctp_log_sack(asoc->last_acked_seq,
4840 tp1->rec.data.TSN_seq,
4843 SCTP_LOG_FREE_SENT);
4846 asoc->sent_queue_cnt--;
4847 sctp_free_a_chunk(stcb, tp1);
4850 } while (tp1 != NULL);
4853 /* sa_ignore NO_NULL_CHK */
4854 if ((wake_him) && (stcb->sctp_socket)) {
4855 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4859 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4860 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4861 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4863 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4864 so = SCTP_INP_SO(stcb->sctp_ep);
4865 atomic_add_int(&stcb->asoc.refcnt, 1);
4866 SCTP_TCB_UNLOCK(stcb);
4867 SCTP_SOCKET_LOCK(so, 1);
4868 SCTP_TCB_LOCK(stcb);
4869 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4870 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4871 /* assoc was freed while we were unlocked */
4872 SCTP_SOCKET_UNLOCK(so, 1);
4876 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4877 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4878 SCTP_SOCKET_UNLOCK(so, 1);
4881 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4882 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4886 if (asoc->fast_retran_loss_recovery && accum_moved) {
4887 if (compare_with_wrap(asoc->last_acked_seq,
4888 asoc->fast_recovery_tsn, MAX_TSN) ||
4889 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4890 /* Setup so we will exit RFC2582 fast recovery */
4891 will_exit_fast_recovery = 1;
4895 * Check for revoked fragments:
4897 * if Previous sack - Had no frags then we can't have any revoked if
4898 * Previous sack - Had frag's then - If we now have frags aka
4899 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4900 * some of them. else - The peer revoked all ACKED fragments, since
4901 * we had some before and now we have NONE.
4905 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4906 asoc->saw_sack_with_frags = 1;
4907 } else if (asoc->saw_sack_with_frags) {
4908 int cnt_revoked = 0;
4910 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4912 /* Peer revoked all dg's marked or acked */
4913 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4914 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4915 tp1->sent = SCTP_DATAGRAM_SENT;
4916 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4917 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4918 tp1->whoTo->flight_size,
4920 (uintptr_t) tp1->whoTo,
4921 tp1->rec.data.TSN_seq);
4923 sctp_flight_size_increase(tp1);
4924 sctp_total_flight_increase(stcb, tp1);
4925 tp1->rec.data.chunk_was_revoked = 1;
4927 * To ensure that this increase in
4928 * flightsize, which is artificial,
4929 * does not throttle the sender, we
4930 * also increase the cwnd
4933 tp1->whoTo->cwnd += tp1->book_size;
4941 asoc->saw_sack_with_frags = 0;
4944 asoc->saw_sack_with_nr_frags = 1;
4946 asoc->saw_sack_with_nr_frags = 0;
4948 /* JRS - Use the congestion control given in the CC module */
4949 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4951 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4952 /* nothing left in-flight */
4953 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4954 /* stop all timers */
4955 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4956 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4957 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4958 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4959 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4962 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4963 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4964 net->flight_size = 0;
4965 net->partial_bytes_acked = 0;
4967 asoc->total_flight = 0;
4968 asoc->total_flight_count = 0;
4970 /**********************************/
4971 /* Now what about shutdown issues */
4972 /**********************************/
4973 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4974 /* nothing left on sendqueue.. consider done */
4975 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4976 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4977 asoc->peers_rwnd, 0, 0, a_rwnd);
4979 asoc->peers_rwnd = a_rwnd;
4980 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4981 /* SWS sender side engages */
4982 asoc->peers_rwnd = 0;
4985 if ((asoc->stream_queue_cnt == 1) &&
4986 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4987 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4988 (asoc->locked_on_sending)
4990 struct sctp_stream_queue_pending *sp;
4993 * I may be in a state where we got all across.. but
4994 * cannot write more due to a shutdown... we abort
4995 * since the user did not indicate EOR in this case.
4997 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4999 if ((sp) && (sp->length == 0)) {
5000 asoc->locked_on_sending = NULL;
5001 if (sp->msg_is_complete) {
5002 asoc->stream_queue_cnt--;
5004 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5005 asoc->stream_queue_cnt--;
5009 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5010 (asoc->stream_queue_cnt == 0)) {
5011 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5012 /* Need to abort here */
5018 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5019 0, M_DONTWAIT, 1, MT_DATA);
5021 struct sctp_paramhdr *ph;
5024 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5026 ph = mtod(oper, struct sctp_paramhdr *);
5027 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5028 ph->param_length = htons(SCTP_BUF_LEN(oper));
5029 ippp = (uint32_t *) (ph + 1);
5030 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5032 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5033 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5036 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5037 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5038 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5040 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5041 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5042 sctp_stop_timers_for_shutdown(stcb);
5043 sctp_send_shutdown(stcb,
5044 stcb->asoc.primary_destination);
5045 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5046 stcb->sctp_ep, stcb, asoc->primary_destination);
5047 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5048 stcb->sctp_ep, stcb, asoc->primary_destination);
5051 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5052 (asoc->stream_queue_cnt == 0)) {
5053 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5056 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5057 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5058 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5059 sctp_send_shutdown_ack(stcb,
5060 stcb->asoc.primary_destination);
5061 sctp_stop_timers_for_shutdown(stcb);
5062 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5063 stcb->sctp_ep, stcb, asoc->primary_destination);
5068 * Now here we are going to recycle net_ack for a different use...
5071 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5076 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5077 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5078 * automatically ensure that.
5080 if ((asoc->sctp_cmt_on_off == 1) &&
5081 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5082 (cmt_dac_flag == 0)) {
5083 this_sack_lowest_newack = cum_ack;
5085 if ((num_seg > 0) || (num_nr_seg > 0)) {
5086 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5087 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5089 /* JRS - Use the congestion control given in the CC module */
5090 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5092 /******************************************************************
5093 * Here we do the stuff with ECN Nonce checking.
5094 * We basically check to see if the nonce sum flag was incorrect
5095 * or if resynchronization needs to be done. Also if we catch a
5096 * misbehaving receiver we give him the kick.
5097 ******************************************************************/
5099 if (asoc->ecn_nonce_allowed) {
5100 if (asoc->nonce_sum_check) {
5101 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5102 if (asoc->nonce_wait_for_ecne == 0) {
5103 struct sctp_tmit_chunk *lchk;
5105 lchk = TAILQ_FIRST(&asoc->send_queue);
5106 asoc->nonce_wait_for_ecne = 1;
5108 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5110 asoc->nonce_wait_tsn = asoc->sending_seq;
5113 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5114 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5116 * Misbehaving peer. We need
5117 * to react to this guy
5119 asoc->ecn_allowed = 0;
5120 asoc->ecn_nonce_allowed = 0;
5125 /* See if Resynchronization Possible */
5126 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5127 asoc->nonce_sum_check = 1;
5129 * now we must calculate what the base is.
5130 * We do this based on two things, we know
5131 * the total's for all the segments
5132 * gap-acked in the SACK, its stored in
5133 * ecn_seg_sums. We also know the SACK's
5134 * nonce sum, its in nonce_sum_flag. So we
5135 * can build a truth table to back-calculate
5137 * asoc->nonce_sum_expect_base:
5139 * SACK-flag-Value Seg-Sums Base 0 0 0
5143 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5147 /* Now are we exiting loss recovery ? */
5148 if (will_exit_fast_recovery) {
5149 /* Ok, we must exit fast recovery */
5150 asoc->fast_retran_loss_recovery = 0;
5152 if ((asoc->sat_t3_loss_recovery) &&
5153 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5155 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5156 /* end satellite t3 loss recovery */
5157 asoc->sat_t3_loss_recovery = 0;
5162 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5163 if (net->will_exit_fast_recovery) {
5164 /* Ok, we must exit fast recovery */
5165 net->fast_retran_loss_recovery = 0;
5169 /* Adjust and set the new rwnd value */
5170 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5171 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5172 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5174 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5175 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5176 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5177 /* SWS sender side engages */
5178 asoc->peers_rwnd = 0;
5180 if (asoc->peers_rwnd > old_rwnd) {
5181 win_probe_recovery = 1;
5184 * Now we must setup so we have a timer up for anyone with
5190 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5191 if (win_probe_recovery && (net->window_probe)) {
5192 win_probe_recovered = 1;
5194 * Find first chunk that was used with
5195 * window probe and clear the event. Put
5196 * it back into the send queue as if has
5199 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5200 if (tp1->window_probe) {
5201 sctp_window_probe_recovery(stcb, asoc, net, tp1);
5206 if (net->flight_size) {
5208 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5209 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5210 stcb->sctp_ep, stcb, net);
5212 if (net->window_probe) {
5213 net->window_probe = 0;
5216 if (net->window_probe) {
5218 * In window probes we must assure a timer
5219 * is still running there
5221 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5222 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5223 stcb->sctp_ep, stcb, net);
5226 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5227 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5229 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5231 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5232 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5233 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5234 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5235 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5241 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5242 (asoc->sent_queue_retran_cnt == 0) &&
5243 (win_probe_recovered == 0) &&
5246 * huh, this should not happen unless all packets are
5247 * PR-SCTP and marked to skip of course.
5249 if (sctp_fs_audit(asoc)) {
5250 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5251 net->flight_size = 0;
5253 asoc->total_flight = 0;
5254 asoc->total_flight_count = 0;
5255 asoc->sent_queue_retran_cnt = 0;
5256 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5257 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5258 sctp_flight_size_increase(tp1);
5259 sctp_total_flight_increase(stcb, tp1);
5260 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5261 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5268 /*********************************************/
5269 /* Here we perform PR-SCTP procedures */
5271 /*********************************************/
5272 /* C1. update advancedPeerAckPoint */
5273 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5274 asoc->advanced_peer_ack_point = cum_ack;
5276 /* C2. try to further move advancedPeerAckPoint ahead */
5277 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5278 struct sctp_tmit_chunk *lchk;
5279 uint32_t old_adv_peer_ack_point;
5281 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5282 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5283 /* C3. See if we need to send a Fwd-TSN */
5284 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5287 * ISSUE with ECN, see FWD-TSN processing for notes
5288 * on issues that will occur when the ECN NONCE
5289 * stuff is put into SCTP for cross checking.
5291 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5292 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5293 0xee, cum_ack, asoc->advanced_peer_ack_point,
5294 old_adv_peer_ack_point);
5296 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5299 send_forward_tsn(stcb, asoc);
5301 * ECN Nonce: Disable Nonce Sum check when
5302 * FWD TSN is sent and store resync tsn
5304 asoc->nonce_sum_check = 0;
5305 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5307 /* try to FR fwd-tsn's that get lost too */
5308 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5309 send_forward_tsn(stcb, asoc);
5314 /* Assure a timer is up */
5315 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5316 stcb->sctp_ep, stcb, lchk->whoTo);
5319 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5320 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5322 stcb->asoc.peers_rwnd,
5323 stcb->asoc.total_flight,
5324 stcb->asoc.total_output_queue_size);
5329 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5330 struct sctp_nets *netp, int *abort_flag)
5333 uint32_t cum_ack, a_rwnd;
5335 cum_ack = ntohl(cp->cumulative_tsn_ack);
5336 /* Arrange so a_rwnd does NOT change */
5337 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5339 /* Now call the express sack handling */
5340 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5344 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5345 struct sctp_stream_in *strmin)
5347 struct sctp_queued_to_read *ctl, *nctl;
5348 struct sctp_association *asoc;
5352 tt = strmin->last_sequence_delivered;
5354 * First deliver anything prior to and including the stream no that
5357 ctl = TAILQ_FIRST(&strmin->inqueue);
5359 nctl = TAILQ_NEXT(ctl, next);
5360 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5361 (tt == ctl->sinfo_ssn)) {
5362 /* this is deliverable now */
5363 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5364 /* subtract pending on streams */
5365 asoc->size_on_all_streams -= ctl->length;
5366 sctp_ucount_decr(asoc->cnt_on_all_streams);
5367 /* deliver it to at least the delivery-q */
5368 if (stcb->sctp_socket) {
5369 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5370 sctp_add_to_readq(stcb->sctp_ep, stcb,
5372 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5375 /* no more delivery now. */
5381 * now we must deliver things in queue the normal way if any are
5384 tt = strmin->last_sequence_delivered + 1;
5385 ctl = TAILQ_FIRST(&strmin->inqueue);
5387 nctl = TAILQ_NEXT(ctl, next);
5388 if (tt == ctl->sinfo_ssn) {
5389 /* this is deliverable now */
5390 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5391 /* subtract pending on streams */
5392 asoc->size_on_all_streams -= ctl->length;
5393 sctp_ucount_decr(asoc->cnt_on_all_streams);
5394 /* deliver it to at least the delivery-q */
5395 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5396 if (stcb->sctp_socket) {
5397 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5398 sctp_add_to_readq(stcb->sctp_ep, stcb,
5400 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5403 tt = strmin->last_sequence_delivered + 1;
5412 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5413 struct sctp_association *asoc,
5414 uint16_t stream, uint16_t seq)
5416 struct sctp_tmit_chunk *chk, *at;
5418 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5419 /* For each one on here see if we need to toss it */
5421 * For now large messages held on the reasmqueue that are
5422 * complete will be tossed too. We could in theory do more
5423 * work to spin through and stop after dumping one msg aka
5424 * seeing the start of a new msg at the head, and call the
5425 * delivery function... to see if it can be delivered... But
5426 * for now we just dump everything on the queue.
5428 chk = TAILQ_FIRST(&asoc->reasmqueue);
5430 at = TAILQ_NEXT(chk, sctp_next);
5432 * Do not toss it if on a different stream or marked
5433 * for unordered delivery in which case the stream
5434 * sequence number has no meaning.
5436 if ((chk->rec.data.stream_number != stream) ||
5437 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5441 if (chk->rec.data.stream_seq == seq) {
5442 /* It needs to be tossed */
5443 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5444 if (compare_with_wrap(chk->rec.data.TSN_seq,
5445 asoc->tsn_last_delivered, MAX_TSN)) {
5446 asoc->tsn_last_delivered =
5447 chk->rec.data.TSN_seq;
5448 asoc->str_of_pdapi =
5449 chk->rec.data.stream_number;
5450 asoc->ssn_of_pdapi =
5451 chk->rec.data.stream_seq;
5452 asoc->fragment_flags =
5453 chk->rec.data.rcv_flags;
5455 asoc->size_on_reasm_queue -= chk->send_size;
5456 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5458 /* Clear up any stream problem */
5459 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5460 SCTP_DATA_UNORDERED &&
5461 (compare_with_wrap(chk->rec.data.stream_seq,
5462 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5465 * We must dump forward this streams
5466 * sequence number if the chunk is
5467 * not unordered that is being
5468 * skipped. There is a chance that
5469 * if the peer does not include the
5470 * last fragment in its FWD-TSN we
5471 * WILL have a problem here since
5472 * you would have a partial chunk in
5473 * queue that may not be
5474 * deliverable. Also if a Partial
5475 * delivery API as started the user
5476 * may get a partial chunk. The next
5477 * read returning a new chunk...
5478 * really ugly but I see no way
5479 * around it! Maybe a notify??
5481 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5482 chk->rec.data.stream_seq;
5485 sctp_m_freem(chk->data);
5488 sctp_free_a_chunk(stcb, chk);
5489 } else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5491 * If the stream_seq is > than the purging
5503 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5504 struct sctp_forward_tsn_chunk *fwd,
5505 int *abort_flag, struct mbuf *m, int offset)
5508 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5509 * forward TSN, when the SACK comes back that acknowledges the
5510 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5511 * get quite tricky since we may have sent more data interveneing
5512 * and must carefully account for what the SACK says on the nonce
5513 * and any gaps that are reported. This work will NOT be done here,
5514 * but I note it here since it is really related to PR-SCTP and
5518 /* The pr-sctp fwd tsn */
5520 * here we will perform all the data receiver side steps for
5521 * processing FwdTSN, as required in by pr-sctp draft:
5523 * Assume we get FwdTSN(x):
5525 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5526 * others we have 3) examine and update re-ordering queue on
5527 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5528 * report where we are.
5530 struct sctp_association *asoc;
5531 uint32_t new_cum_tsn, gap;
5532 unsigned int i, fwd_sz, cumack_set_flag, m_size;
5534 struct sctp_stream_in *strm;
5535 struct sctp_tmit_chunk *chk, *at;
5536 struct sctp_queued_to_read *ctl, *sv;
5538 cumack_set_flag = 0;
5540 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5541 SCTPDBG(SCTP_DEBUG_INDATA1,
5542 "Bad size too small/big fwd-tsn\n");
5545 m_size = (stcb->asoc.mapping_array_size << 3);
5546 /*************************************************************/
5547 /* 1. Here we update local cumTSN and shift the bitmap array */
5548 /*************************************************************/
5549 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5551 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5552 asoc->cumulative_tsn == new_cum_tsn) {
5553 /* Already got there ... */
5557 * now we know the new TSN is more advanced, let's find the actual
5560 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5561 asoc->cumulative_tsn = new_cum_tsn;
5562 if (gap >= m_size) {
5563 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5567 * out of range (of single byte chunks in the rwnd I
5568 * give out). This must be an attacker.
5571 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5572 0, M_DONTWAIT, 1, MT_DATA);
5574 struct sctp_paramhdr *ph;
5577 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5578 (sizeof(uint32_t) * 3);
5579 ph = mtod(oper, struct sctp_paramhdr *);
5580 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5581 ph->param_length = htons(SCTP_BUF_LEN(oper));
5582 ippp = (uint32_t *) (ph + 1);
5583 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5585 *ippp = asoc->highest_tsn_inside_map;
5587 *ippp = new_cum_tsn;
5589 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5590 sctp_abort_an_association(stcb->sctp_ep, stcb,
5591 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5594 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5596 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5597 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5598 asoc->highest_tsn_inside_map = new_cum_tsn;
5600 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5601 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5603 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5604 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5606 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5608 SCTP_TCB_LOCK_ASSERT(stcb);
5609 for (i = 0; i <= gap; i++) {
5610 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5611 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5612 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5613 if (compare_with_wrap(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
5614 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5619 /*************************************************************/
5620 /* 2. Clear up re-assembly queue */
5621 /*************************************************************/
5623 * First service it if pd-api is up, just in case we can progress it
5626 if (asoc->fragmented_delivery_inprogress) {
5627 sctp_service_reassembly(stcb, asoc);
5629 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5630 /* For each one on here see if we need to toss it */
5632 * For now large messages held on the reasmqueue that are
5633 * complete will be tossed too. We could in theory do more
5634 * work to spin through and stop after dumping one msg aka
5635 * seeing the start of a new msg at the head, and call the
5636 * delivery function... to see if it can be delivered... But
5637 * for now we just dump everything on the queue.
5639 chk = TAILQ_FIRST(&asoc->reasmqueue);
5641 at = TAILQ_NEXT(chk, sctp_next);
5642 if ((compare_with_wrap(new_cum_tsn,
5643 chk->rec.data.TSN_seq, MAX_TSN)) ||
5644 (new_cum_tsn == chk->rec.data.TSN_seq)) {
5645 /* It needs to be tossed */
5646 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5647 if (compare_with_wrap(chk->rec.data.TSN_seq,
5648 asoc->tsn_last_delivered, MAX_TSN)) {
5649 asoc->tsn_last_delivered =
5650 chk->rec.data.TSN_seq;
5651 asoc->str_of_pdapi =
5652 chk->rec.data.stream_number;
5653 asoc->ssn_of_pdapi =
5654 chk->rec.data.stream_seq;
5655 asoc->fragment_flags =
5656 chk->rec.data.rcv_flags;
5658 asoc->size_on_reasm_queue -= chk->send_size;
5659 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5661 /* Clear up any stream problem */
5662 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5663 SCTP_DATA_UNORDERED &&
5664 (compare_with_wrap(chk->rec.data.stream_seq,
5665 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5668 * We must dump forward this streams
5669 * sequence number if the chunk is
5670 * not unordered that is being
5671 * skipped. There is a chance that
5672 * if the peer does not include the
5673 * last fragment in its FWD-TSN we
5674 * WILL have a problem here since
5675 * you would have a partial chunk in
5676 * queue that may not be
5677 * deliverable. Also if a Partial
5678 * delivery API as started the user
5679 * may get a partial chunk. The next
5680 * read returning a new chunk...
5681 * really ugly but I see no way
5682 * around it! Maybe a notify??
5684 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5685 chk->rec.data.stream_seq;
5688 sctp_m_freem(chk->data);
5691 sctp_free_a_chunk(stcb, chk);
5694 * Ok we have gone beyond the end of the
5702 /*******************************************************/
5703 /* 3. Update the PR-stream re-ordering queues and fix */
5704 /* delivery issues as needed. */
5705 /*******************************************************/
5706 fwd_sz -= sizeof(*fwd);
5709 unsigned int num_str;
5710 struct sctp_strseq *stseq, strseqbuf;
5712 offset += sizeof(*fwd);
5714 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5715 num_str = fwd_sz / sizeof(struct sctp_strseq);
5716 for (i = 0; i < num_str; i++) {
5719 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5720 sizeof(struct sctp_strseq),
5721 (uint8_t *) & strseqbuf);
5722 offset += sizeof(struct sctp_strseq);
5723 if (stseq == NULL) {
5727 st = ntohs(stseq->stream);
5729 st = ntohs(stseq->sequence);
5730 stseq->sequence = st;
5735 * Ok we now look for the stream/seq on the read
5736 * queue where its not all delivered. If we find it
5737 * we transmute the read entry into a PDI_ABORTED.
5739 if (stseq->stream >= asoc->streamincnt) {
5740 /* screwed up streams, stop! */
5743 if ((asoc->str_of_pdapi == stseq->stream) &&
5744 (asoc->ssn_of_pdapi == stseq->sequence)) {
5746 * If this is the one we were partially
5747 * delivering now then we no longer are.
5748 * Note this will change with the reassembly
5751 asoc->fragmented_delivery_inprogress = 0;
5753 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5754 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5755 if ((ctl->sinfo_stream == stseq->stream) &&
5756 (ctl->sinfo_ssn == stseq->sequence)) {
5757 str_seq = (stseq->stream << 16) | stseq->sequence;
5759 ctl->pdapi_aborted = 1;
5760 sv = stcb->asoc.control_pdapi;
5761 stcb->asoc.control_pdapi = ctl;
5762 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5764 SCTP_PARTIAL_DELIVERY_ABORTED,
5766 SCTP_SO_NOT_LOCKED);
5767 stcb->asoc.control_pdapi = sv;
5769 } else if ((ctl->sinfo_stream == stseq->stream) &&
5770 (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
5771 /* We are past our victim SSN */
5775 strm = &asoc->strmin[stseq->stream];
5776 if (compare_with_wrap(stseq->sequence,
5777 strm->last_sequence_delivered, MAX_SEQ)) {
5778 /* Update the sequence number */
5779 strm->last_sequence_delivered =
5782 /* now kick the stream the new way */
5783 /* sa_ignore NO_NULL_CHK */
5784 sctp_kick_prsctp_reorder_queue(stcb, strm);
5786 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5789 * Now slide thing forward.
5791 sctp_slide_mapping_arrays(stcb);
5793 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5794 /* now lets kick out and check for more fragmented delivery */
5795 /* sa_ignore NO_NULL_CHK */
5796 sctp_deliver_reasm_check(stcb, &stcb->asoc);