2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
95 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
101 /* what is the overhead of all these rwnd's */
102 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
104 * If the window gets too small due to ctrl-stuff, reduce it to 1,
105 * even it is 0. SWS engaged
107 if (calc < stcb->asoc.my_rwnd_control_len) {
116 * Build out our readq entry based on the incoming packet.
118 struct sctp_queued_to_read *
119 sctp_build_readq_entry(struct sctp_tcb *stcb,
120 struct sctp_nets *net,
121 uint32_t tsn, uint32_t ppid,
122 uint32_t context, uint16_t stream_no,
123 uint16_t stream_seq, uint8_t flags,
126 struct sctp_queued_to_read *read_queue_e = NULL;
128 sctp_alloc_a_readq(stcb, read_queue_e);
129 if (read_queue_e == NULL) {
132 read_queue_e->sinfo_stream = stream_no;
133 read_queue_e->sinfo_ssn = stream_seq;
134 read_queue_e->sinfo_flags = (flags << 8);
135 read_queue_e->sinfo_ppid = ppid;
136 read_queue_e->sinfo_context = stcb->asoc.context;
137 read_queue_e->sinfo_timetolive = 0;
138 read_queue_e->sinfo_tsn = tsn;
139 read_queue_e->sinfo_cumtsn = tsn;
140 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
141 read_queue_e->whoFrom = net;
142 read_queue_e->length = 0;
143 atomic_add_int(&net->ref_count, 1);
144 read_queue_e->data = dm;
145 read_queue_e->spec_flags = 0;
146 read_queue_e->tail_mbuf = NULL;
147 read_queue_e->aux_data = NULL;
148 read_queue_e->stcb = stcb;
149 read_queue_e->port_from = stcb->rport;
150 read_queue_e->do_not_ref_stcb = 0;
151 read_queue_e->end_added = 0;
152 read_queue_e->some_taken = 0;
153 read_queue_e->pdapi_aborted = 0;
155 return (read_queue_e);
160 * Build out our readq entry based on the incoming packet.
162 static struct sctp_queued_to_read *
163 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
164 struct sctp_tmit_chunk *chk)
166 struct sctp_queued_to_read *read_queue_e = NULL;
168 sctp_alloc_a_readq(stcb, read_queue_e);
169 if (read_queue_e == NULL) {
172 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
173 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
174 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
175 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
176 read_queue_e->sinfo_context = stcb->asoc.context;
177 read_queue_e->sinfo_timetolive = 0;
178 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
179 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
180 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
181 read_queue_e->whoFrom = chk->whoTo;
182 read_queue_e->aux_data = NULL;
183 read_queue_e->length = 0;
184 atomic_add_int(&chk->whoTo->ref_count, 1);
185 read_queue_e->data = chk->data;
186 read_queue_e->tail_mbuf = NULL;
187 read_queue_e->stcb = stcb;
188 read_queue_e->port_from = stcb->rport;
189 read_queue_e->spec_flags = 0;
190 read_queue_e->do_not_ref_stcb = 0;
191 read_queue_e->end_added = 0;
192 read_queue_e->some_taken = 0;
193 read_queue_e->pdapi_aborted = 0;
195 return (read_queue_e);
200 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
201 struct sctp_sndrcvinfo *sinfo)
203 struct sctp_sndrcvinfo *outinfo;
207 int use_extended = 0;
209 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
210 /* user does not want the sndrcv ctl */
213 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
215 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
217 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
221 ret = sctp_get_mbuf_for_msg(len,
222 0, M_DONTWAIT, 1, MT_DATA);
228 /* We need a CMSG header followed by the struct */
229 cmh = mtod(ret, struct cmsghdr *);
230 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
231 cmh->cmsg_level = IPPROTO_SCTP;
233 cmh->cmsg_type = SCTP_EXTRCV;
235 memcpy(outinfo, sinfo, len);
237 cmh->cmsg_type = SCTP_SNDRCV;
241 SCTP_BUF_LEN(ret) = cmh->cmsg_len;
247 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
249 struct sctp_sndrcvinfo *sinfo)
251 struct sctp_sndrcvinfo *outinfo;
255 int use_extended = 0;
257 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
258 /* user does not want the sndrcv ctl */
261 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
263 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
265 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
267 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
272 /* We need a CMSG header followed by the struct */
273 cmh = (struct cmsghdr *)buf;
274 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
275 cmh->cmsg_level = IPPROTO_SCTP;
277 cmh->cmsg_type = SCTP_EXTRCV;
279 memcpy(outinfo, sinfo, len);
281 cmh->cmsg_type = SCTP_SNDRCV;
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
292 uint32_t gap, i, cumackp1;
295 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
298 cumackp1 = asoc->cumulative_tsn + 1;
299 if (compare_with_wrap(cumackp1, tsn, MAX_TSN)) {
301 * this tsn is behind the cum ack and thus we don't need to
302 * worry about it being moved from one to the other.
306 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
307 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
308 printf("gap:%x tsn:%x\n", gap, tsn);
309 sctp_print_mapping_array(asoc);
311 panic("Things are really messed up now!!");
314 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
315 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
316 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
317 asoc->highest_tsn_inside_nr_map = tsn;
319 if (tsn == asoc->highest_tsn_inside_map) {
320 /* We must back down to see what the new highest is */
321 for (i = tsn - 1; (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) ||
322 (i == asoc->mapping_array_base_tsn)); i--) {
323 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
324 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
325 asoc->highest_tsn_inside_map = i;
331 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
338 * We are delivering currently from the reassembly queue. We must continue to
339 * deliver until we either: 1) run out of space. 2) run out of sequential
340 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
343 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
345 struct sctp_tmit_chunk *chk;
351 struct sctp_queued_to_read *control, *ctl, *ctlat;
356 cntDel = stream_no = 0;
357 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
358 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
359 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
360 /* socket above is long gone or going.. */
362 asoc->fragmented_delivery_inprogress = 0;
363 chk = TAILQ_FIRST(&asoc->reasmqueue);
365 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
366 asoc->size_on_reasm_queue -= chk->send_size;
367 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
369 * Lose the data pointer, since its in the socket
373 sctp_m_freem(chk->data);
376 /* Now free the address and data */
377 sctp_free_a_chunk(stcb, chk);
378 /* sa_ignore FREED_MEMORY */
379 chk = TAILQ_FIRST(&asoc->reasmqueue);
383 SCTP_TCB_LOCK_ASSERT(stcb);
385 chk = TAILQ_FIRST(&asoc->reasmqueue);
389 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
390 /* Can't deliver more :< */
393 stream_no = chk->rec.data.stream_number;
394 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
395 if (nxt_todel != chk->rec.data.stream_seq &&
396 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
398 * Not the next sequence to deliver in its stream OR
403 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
405 control = sctp_build_readq_entry_chk(stcb, chk);
406 if (control == NULL) {
410 /* save it off for our future deliveries */
411 stcb->asoc.control_pdapi = control;
412 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
416 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
417 sctp_add_to_readq(stcb->sctp_ep,
418 stcb, control, &stcb->sctp_socket->so_rcv, end,
419 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
422 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
426 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
427 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
428 stcb->asoc.control_pdapi,
429 chk->data, end, chk->rec.data.TSN_seq,
430 &stcb->sctp_socket->so_rcv)) {
432 * something is very wrong, either
433 * control_pdapi is NULL, or the tail_mbuf
434 * is corrupt, or there is a EOM already on
437 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
441 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
442 panic("This should not happen control_pdapi NULL?");
444 /* if we did not panic, it was a EOM */
445 panic("Bad chunking ??");
447 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
448 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
450 SCTP_PRINTF("Bad chunking ??\n");
451 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
459 /* pull it we did it */
460 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
461 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
462 asoc->fragmented_delivery_inprogress = 0;
463 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
464 asoc->strmin[stream_no].last_sequence_delivered++;
466 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
467 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
469 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
471 * turn the flag back on since we just delivered
474 asoc->fragmented_delivery_inprogress = 1;
476 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
477 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
478 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
479 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
481 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
482 asoc->size_on_reasm_queue -= chk->send_size;
483 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
484 /* free up the chk */
486 sctp_free_a_chunk(stcb, chk);
488 if (asoc->fragmented_delivery_inprogress == 0) {
490 * Now lets see if we can deliver the next one on
493 struct sctp_stream_in *strm;
495 strm = &asoc->strmin[stream_no];
496 nxt_todel = strm->last_sequence_delivered + 1;
497 ctl = TAILQ_FIRST(&strm->inqueue);
498 if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
499 while (ctl != NULL) {
500 /* Deliver more if we can. */
501 if (nxt_todel == ctl->sinfo_ssn) {
502 ctlat = TAILQ_NEXT(ctl, next);
503 TAILQ_REMOVE(&strm->inqueue, ctl, next);
504 asoc->size_on_all_streams -= ctl->length;
505 sctp_ucount_decr(asoc->cnt_on_all_streams);
506 strm->last_sequence_delivered++;
507 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
508 sctp_add_to_readq(stcb->sctp_ep, stcb,
510 &stcb->sctp_socket->so_rcv, 1,
511 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
516 nxt_todel = strm->last_sequence_delivered + 1;
521 /* sa_ignore FREED_MEMORY */
522 chk = TAILQ_FIRST(&asoc->reasmqueue);
527 * Queue the chunk either right into the socket buffer if it is the next one
528 * to go OR put it in the correct place in the delivery queue. If we do
529 * append to the so_buf, keep doing so until we are out of order. One big
530 * question still remains, what to do when the socket buffer is FULL??
533 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
534 struct sctp_queued_to_read *control, int *abort_flag)
537 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
538 * all the data in one stream this could happen quite rapidly. One
539 * could use the TSN to keep track of things, but this scheme breaks
540 * down in the other type of stream useage that could occur. Send a
541 * single msg to stream 0, send 4Billion messages to stream 1, now
542 * send a message to stream 0. You have a situation where the TSN
543 * has wrapped but not in the stream. Is this worth worrying about
544 * or should we just change our queue sort at the bottom to be by
547 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
548 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
549 * assignment this could happen... and I don't see how this would be
550 * a violation. So for now I am undecided an will leave the sort by
551 * SSN alone. Maybe a hybred approach is the answer
554 struct sctp_stream_in *strm;
555 struct sctp_queued_to_read *at;
561 asoc->size_on_all_streams += control->length;
562 sctp_ucount_incr(asoc->cnt_on_all_streams);
563 strm = &asoc->strmin[control->sinfo_stream];
564 nxt_todel = strm->last_sequence_delivered + 1;
565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
566 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
568 SCTPDBG(SCTP_DEBUG_INDATA1,
569 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
570 (uint32_t) control->sinfo_stream,
571 (uint32_t) strm->last_sequence_delivered,
572 (uint32_t) nxt_todel);
573 if (compare_with_wrap(strm->last_sequence_delivered,
574 control->sinfo_ssn, MAX_SEQ) ||
575 (strm->last_sequence_delivered == control->sinfo_ssn)) {
576 /* The incoming sseq is behind where we last delivered? */
577 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
578 control->sinfo_ssn, strm->last_sequence_delivered);
581 * throw it in the stream so it gets cleaned up in
582 * association destruction
584 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
585 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
586 0, M_DONTWAIT, 1, MT_DATA);
588 struct sctp_paramhdr *ph;
591 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
592 (sizeof(uint32_t) * 3);
593 ph = mtod(oper, struct sctp_paramhdr *);
594 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
595 ph->param_length = htons(SCTP_BUF_LEN(oper));
596 ippp = (uint32_t *) (ph + 1);
597 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
599 *ippp = control->sinfo_tsn;
601 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
603 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
604 sctp_abort_an_association(stcb->sctp_ep, stcb,
605 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
611 if (nxt_todel == control->sinfo_ssn) {
612 /* can be delivered right away? */
613 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
614 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
616 /* EY it wont be queued if it could be delivered directly */
618 asoc->size_on_all_streams -= control->length;
619 sctp_ucount_decr(asoc->cnt_on_all_streams);
620 strm->last_sequence_delivered++;
622 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
623 sctp_add_to_readq(stcb->sctp_ep, stcb,
625 &stcb->sctp_socket->so_rcv, 1,
626 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
627 control = TAILQ_FIRST(&strm->inqueue);
628 while (control != NULL) {
630 nxt_todel = strm->last_sequence_delivered + 1;
631 if (nxt_todel == control->sinfo_ssn) {
632 at = TAILQ_NEXT(control, next);
633 TAILQ_REMOVE(&strm->inqueue, control, next);
634 asoc->size_on_all_streams -= control->length;
635 sctp_ucount_decr(asoc->cnt_on_all_streams);
636 strm->last_sequence_delivered++;
638 * We ignore the return of deliver_data here
639 * since we always can hold the chunk on the
640 * d-queue. And we have a finite number that
641 * can be delivered from the strq.
643 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
644 sctp_log_strm_del(control, NULL,
645 SCTP_STR_LOG_FROM_IMMED_DEL);
647 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
648 sctp_add_to_readq(stcb->sctp_ep, stcb,
650 &stcb->sctp_socket->so_rcv, 1,
651 SCTP_READ_LOCK_NOT_HELD,
661 * Ok, we did not deliver this guy, find the correct place
662 * to put it on the queue.
664 if ((compare_with_wrap(asoc->cumulative_tsn,
665 control->sinfo_tsn, MAX_TSN)) ||
666 (control->sinfo_tsn == asoc->cumulative_tsn)) {
669 if (TAILQ_EMPTY(&strm->inqueue)) {
671 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
672 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
674 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
676 TAILQ_FOREACH(at, &strm->inqueue, next) {
677 if (compare_with_wrap(at->sinfo_ssn,
678 control->sinfo_ssn, MAX_SEQ)) {
680 * one in queue is bigger than the
681 * new one, insert before this one
683 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
684 sctp_log_strm_del(control, at,
685 SCTP_STR_LOG_FROM_INSERT_MD);
687 TAILQ_INSERT_BEFORE(at, control, next);
689 } else if (at->sinfo_ssn == control->sinfo_ssn) {
691 * Gak, He sent me a duplicate str
695 * foo bar, I guess I will just free
696 * this new guy, should we abort
697 * too? FIX ME MAYBE? Or it COULD be
698 * that the SSN's have wrapped.
699 * Maybe I should compare to TSN
700 * somehow... sigh for now just blow
705 sctp_m_freem(control->data);
706 control->data = NULL;
707 asoc->size_on_all_streams -= control->length;
708 sctp_ucount_decr(asoc->cnt_on_all_streams);
709 if (control->whoFrom)
710 sctp_free_remote_addr(control->whoFrom);
711 control->whoFrom = NULL;
712 sctp_free_a_readq(stcb, control);
715 if (TAILQ_NEXT(at, next) == NULL) {
717 * We are at the end, insert
720 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
721 sctp_log_strm_del(control, at,
722 SCTP_STR_LOG_FROM_INSERT_TL);
724 TAILQ_INSERT_AFTER(&strm->inqueue,
735 * Returns two things: You get the total size of the deliverable parts of the
736 * first fragmented message on the reassembly queue. And you get a 1 back if
737 * all of the message is ready or a 0 back if the message is still incomplete
740 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
742 struct sctp_tmit_chunk *chk;
746 chk = TAILQ_FIRST(&asoc->reasmqueue);
748 /* nothing on the queue */
751 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
752 /* Not a first on the queue */
755 tsn = chk->rec.data.TSN_seq;
757 if (tsn != chk->rec.data.TSN_seq) {
760 *t_size += chk->send_size;
761 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
765 chk = TAILQ_NEXT(chk, sctp_next);
771 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
773 struct sctp_tmit_chunk *chk;
775 uint32_t tsize, pd_point;
778 chk = TAILQ_FIRST(&asoc->reasmqueue);
781 asoc->size_on_reasm_queue = 0;
782 asoc->cnt_on_reasm_queue = 0;
785 if (asoc->fragmented_delivery_inprogress == 0) {
787 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
788 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
789 (nxt_todel == chk->rec.data.stream_seq ||
790 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
792 * Yep the first one is here and its ok to deliver
795 if (stcb->sctp_socket) {
796 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
797 stcb->sctp_ep->partial_delivery_point);
799 pd_point = stcb->sctp_ep->partial_delivery_point;
801 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
804 * Yes, we setup to start reception, by
805 * backing down the TSN just in case we
806 * can't deliver. If we
808 asoc->fragmented_delivery_inprogress = 1;
809 asoc->tsn_last_delivered =
810 chk->rec.data.TSN_seq - 1;
812 chk->rec.data.stream_number;
813 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
814 asoc->pdapi_ppid = chk->rec.data.payloadtype;
815 asoc->fragment_flags = chk->rec.data.rcv_flags;
816 sctp_service_reassembly(stcb, asoc);
821 * Service re-assembly will deliver stream data queued at
822 * the end of fragmented delivery.. but it wont know to go
823 * back and call itself again... we do that here with the
826 sctp_service_reassembly(stcb, asoc);
827 if (asoc->fragmented_delivery_inprogress == 0) {
829 * finished our Fragmented delivery, could be more
838 * Dump onto the re-assembly queue, in its proper place. After dumping on the
839 * queue, see if anthing can be delivered. If so pull it off (or as much as
840 * we can. If we run out of space then we must dump what we can and set the
841 * appropriate flag to say we queued what we could.
844 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
845 struct sctp_tmit_chunk *chk, int *abort_flag)
848 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
850 struct sctp_tmit_chunk *at, *prev, *next;
853 cum_ackp1 = asoc->tsn_last_delivered + 1;
854 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
855 /* This is the first one on the queue */
856 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
858 * we do not check for delivery of anything when only one
861 asoc->size_on_reasm_queue = chk->send_size;
862 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
863 if (chk->rec.data.TSN_seq == cum_ackp1) {
864 if (asoc->fragmented_delivery_inprogress == 0 &&
865 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
866 SCTP_DATA_FIRST_FRAG) {
868 * An empty queue, no delivery inprogress,
869 * we hit the next one and it does NOT have
870 * a FIRST fragment mark.
872 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
873 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
874 0, M_DONTWAIT, 1, MT_DATA);
877 struct sctp_paramhdr *ph;
881 sizeof(struct sctp_paramhdr) +
882 (sizeof(uint32_t) * 3);
883 ph = mtod(oper, struct sctp_paramhdr *);
885 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
886 ph->param_length = htons(SCTP_BUF_LEN(oper));
887 ippp = (uint32_t *) (ph + 1);
888 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
890 *ippp = chk->rec.data.TSN_seq;
892 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
895 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
896 sctp_abort_an_association(stcb->sctp_ep, stcb,
897 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
899 } else if (asoc->fragmented_delivery_inprogress &&
900 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
902 * We are doing a partial delivery and the
903 * NEXT chunk MUST be either the LAST or
904 * MIDDLE fragment NOT a FIRST
906 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
907 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
908 0, M_DONTWAIT, 1, MT_DATA);
910 struct sctp_paramhdr *ph;
914 sizeof(struct sctp_paramhdr) +
915 (3 * sizeof(uint32_t));
916 ph = mtod(oper, struct sctp_paramhdr *);
918 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
919 ph->param_length = htons(SCTP_BUF_LEN(oper));
920 ippp = (uint32_t *) (ph + 1);
921 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
923 *ippp = chk->rec.data.TSN_seq;
925 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
927 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
928 sctp_abort_an_association(stcb->sctp_ep, stcb,
929 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
931 } else if (asoc->fragmented_delivery_inprogress) {
933 * Here we are ok with a MIDDLE or LAST
936 if (chk->rec.data.stream_number !=
937 asoc->str_of_pdapi) {
938 /* Got to be the right STR No */
939 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
940 chk->rec.data.stream_number,
942 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
943 0, M_DONTWAIT, 1, MT_DATA);
945 struct sctp_paramhdr *ph;
949 sizeof(struct sctp_paramhdr) +
950 (sizeof(uint32_t) * 3);
952 struct sctp_paramhdr *);
954 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
956 htons(SCTP_BUF_LEN(oper));
957 ippp = (uint32_t *) (ph + 1);
958 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
960 *ippp = chk->rec.data.TSN_seq;
962 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
964 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
965 sctp_abort_an_association(stcb->sctp_ep,
966 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
968 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
969 SCTP_DATA_UNORDERED &&
970 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
971 /* Got to be the right STR Seq */
972 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
973 chk->rec.data.stream_seq,
975 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
976 0, M_DONTWAIT, 1, MT_DATA);
978 struct sctp_paramhdr *ph;
982 sizeof(struct sctp_paramhdr) +
983 (3 * sizeof(uint32_t));
985 struct sctp_paramhdr *);
987 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
989 htons(SCTP_BUF_LEN(oper));
990 ippp = (uint32_t *) (ph + 1);
991 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
993 *ippp = chk->rec.data.TSN_seq;
995 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
998 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
999 sctp_abort_an_association(stcb->sctp_ep,
1000 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1007 /* Find its place */
1008 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1009 if (compare_with_wrap(at->rec.data.TSN_seq,
1010 chk->rec.data.TSN_seq, MAX_TSN)) {
1012 * one in queue is bigger than the new one, insert
1016 asoc->size_on_reasm_queue += chk->send_size;
1017 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1019 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1021 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1022 /* Gak, He sent me a duplicate str seq number */
1024 * foo bar, I guess I will just free this new guy,
1025 * should we abort too? FIX ME MAYBE? Or it COULD be
1026 * that the SSN's have wrapped. Maybe I should
1027 * compare to TSN somehow... sigh for now just blow
1031 sctp_m_freem(chk->data);
1034 sctp_free_a_chunk(stcb, chk);
1037 last_flags = at->rec.data.rcv_flags;
1038 last_tsn = at->rec.data.TSN_seq;
1040 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1042 * We are at the end, insert it after this
1045 /* check it first */
1046 asoc->size_on_reasm_queue += chk->send_size;
1047 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1048 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1053 /* Now the audits */
1055 prev_tsn = chk->rec.data.TSN_seq - 1;
1056 if (prev_tsn == prev->rec.data.TSN_seq) {
1058 * Ok the one I am dropping onto the end is the
1059 * NEXT. A bit of valdiation here.
1061 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1062 SCTP_DATA_FIRST_FRAG ||
1063 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1064 SCTP_DATA_MIDDLE_FRAG) {
1066 * Insert chk MUST be a MIDDLE or LAST
1069 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1070 SCTP_DATA_FIRST_FRAG) {
1071 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1072 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1073 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1074 0, M_DONTWAIT, 1, MT_DATA);
1076 struct sctp_paramhdr *ph;
1079 SCTP_BUF_LEN(oper) =
1080 sizeof(struct sctp_paramhdr) +
1081 (3 * sizeof(uint32_t));
1083 struct sctp_paramhdr *);
1085 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1087 htons(SCTP_BUF_LEN(oper));
1088 ippp = (uint32_t *) (ph + 1);
1089 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1091 *ippp = chk->rec.data.TSN_seq;
1093 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1096 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1097 sctp_abort_an_association(stcb->sctp_ep,
1098 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1102 if (chk->rec.data.stream_number !=
1103 prev->rec.data.stream_number) {
1105 * Huh, need the correct STR here,
1106 * they must be the same.
1108 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1109 chk->rec.data.stream_number,
1110 prev->rec.data.stream_number);
1111 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1112 0, M_DONTWAIT, 1, MT_DATA);
1114 struct sctp_paramhdr *ph;
1117 SCTP_BUF_LEN(oper) =
1118 sizeof(struct sctp_paramhdr) +
1119 (3 * sizeof(uint32_t));
1121 struct sctp_paramhdr *);
1123 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1125 htons(SCTP_BUF_LEN(oper));
1126 ippp = (uint32_t *) (ph + 1);
1127 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1129 *ippp = chk->rec.data.TSN_seq;
1131 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1133 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1134 sctp_abort_an_association(stcb->sctp_ep,
1135 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1140 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1141 chk->rec.data.stream_seq !=
1142 prev->rec.data.stream_seq) {
1144 * Huh, need the correct STR here,
1145 * they must be the same.
1147 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1148 chk->rec.data.stream_seq,
1149 prev->rec.data.stream_seq);
1150 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1151 0, M_DONTWAIT, 1, MT_DATA);
1153 struct sctp_paramhdr *ph;
1156 SCTP_BUF_LEN(oper) =
1157 sizeof(struct sctp_paramhdr) +
1158 (3 * sizeof(uint32_t));
1160 struct sctp_paramhdr *);
1162 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1164 htons(SCTP_BUF_LEN(oper));
1165 ippp = (uint32_t *) (ph + 1);
1166 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1168 *ippp = chk->rec.data.TSN_seq;
1170 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1172 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1173 sctp_abort_an_association(stcb->sctp_ep,
1174 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1179 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1180 SCTP_DATA_LAST_FRAG) {
1181 /* Insert chk MUST be a FIRST */
1182 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1183 SCTP_DATA_FIRST_FRAG) {
1184 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1185 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1186 0, M_DONTWAIT, 1, MT_DATA);
1188 struct sctp_paramhdr *ph;
1191 SCTP_BUF_LEN(oper) =
1192 sizeof(struct sctp_paramhdr) +
1193 (3 * sizeof(uint32_t));
1195 struct sctp_paramhdr *);
1197 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1199 htons(SCTP_BUF_LEN(oper));
1200 ippp = (uint32_t *) (ph + 1);
1201 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1203 *ippp = chk->rec.data.TSN_seq;
1205 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1208 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1209 sctp_abort_an_association(stcb->sctp_ep,
1210 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1219 post_tsn = chk->rec.data.TSN_seq + 1;
1220 if (post_tsn == next->rec.data.TSN_seq) {
1222 * Ok the one I am inserting ahead of is my NEXT
1223 * one. A bit of valdiation here.
1225 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1226 /* Insert chk MUST be a last fragment */
1227 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1228 != SCTP_DATA_LAST_FRAG) {
1229 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1230 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1231 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1232 0, M_DONTWAIT, 1, MT_DATA);
1234 struct sctp_paramhdr *ph;
1237 SCTP_BUF_LEN(oper) =
1238 sizeof(struct sctp_paramhdr) +
1239 (3 * sizeof(uint32_t));
1241 struct sctp_paramhdr *);
1243 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1245 htons(SCTP_BUF_LEN(oper));
1246 ippp = (uint32_t *) (ph + 1);
1247 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1249 *ippp = chk->rec.data.TSN_seq;
1251 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1253 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1254 sctp_abort_an_association(stcb->sctp_ep,
1255 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1260 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1261 SCTP_DATA_MIDDLE_FRAG ||
1262 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1263 SCTP_DATA_LAST_FRAG) {
1265 * Insert chk CAN be MIDDLE or FIRST NOT
1268 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1269 SCTP_DATA_LAST_FRAG) {
1270 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1271 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1272 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1273 0, M_DONTWAIT, 1, MT_DATA);
1275 struct sctp_paramhdr *ph;
1278 SCTP_BUF_LEN(oper) =
1279 sizeof(struct sctp_paramhdr) +
1280 (3 * sizeof(uint32_t));
1282 struct sctp_paramhdr *);
1284 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1286 htons(SCTP_BUF_LEN(oper));
1287 ippp = (uint32_t *) (ph + 1);
1288 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1290 *ippp = chk->rec.data.TSN_seq;
1292 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1295 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1296 sctp_abort_an_association(stcb->sctp_ep,
1297 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1302 if (chk->rec.data.stream_number !=
1303 next->rec.data.stream_number) {
1305 * Huh, need the correct STR here,
1306 * they must be the same.
1308 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1309 chk->rec.data.stream_number,
1310 next->rec.data.stream_number);
1311 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1312 0, M_DONTWAIT, 1, MT_DATA);
1314 struct sctp_paramhdr *ph;
1317 SCTP_BUF_LEN(oper) =
1318 sizeof(struct sctp_paramhdr) +
1319 (3 * sizeof(uint32_t));
1321 struct sctp_paramhdr *);
1323 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1325 htons(SCTP_BUF_LEN(oper));
1326 ippp = (uint32_t *) (ph + 1);
1327 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1329 *ippp = chk->rec.data.TSN_seq;
1331 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1334 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1335 sctp_abort_an_association(stcb->sctp_ep,
1336 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1341 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1342 chk->rec.data.stream_seq !=
1343 next->rec.data.stream_seq) {
1345 * Huh, need the correct STR here,
1346 * they must be the same.
1348 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1349 chk->rec.data.stream_seq,
1350 next->rec.data.stream_seq);
1351 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1352 0, M_DONTWAIT, 1, MT_DATA);
1354 struct sctp_paramhdr *ph;
1357 SCTP_BUF_LEN(oper) =
1358 sizeof(struct sctp_paramhdr) +
1359 (3 * sizeof(uint32_t));
1361 struct sctp_paramhdr *);
1363 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1365 htons(SCTP_BUF_LEN(oper));
1366 ippp = (uint32_t *) (ph + 1);
1367 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1369 *ippp = chk->rec.data.TSN_seq;
1371 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1373 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1374 sctp_abort_an_association(stcb->sctp_ep,
1375 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1383 /* Do we need to do some delivery? check */
1384 sctp_deliver_reasm_check(stcb, asoc);
1388 * This is an unfortunate routine. It checks to make sure a evil guy is not
1389 * stuffing us full of bad packet fragments. A broken peer could also do this
1390 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1394 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1397 struct sctp_tmit_chunk *at;
1400 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1401 if (compare_with_wrap(TSN_seq,
1402 at->rec.data.TSN_seq, MAX_TSN)) {
1403 /* is it one bigger? */
1404 tsn_est = at->rec.data.TSN_seq + 1;
1405 if (tsn_est == TSN_seq) {
1406 /* yep. It better be a last then */
1407 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1408 SCTP_DATA_LAST_FRAG) {
1410 * Ok this guy belongs next to a guy
1411 * that is NOT last, it should be a
1412 * middle/last, not a complete
1418 * This guy is ok since its a LAST
1419 * and the new chunk is a fully
1420 * self- contained one.
1425 } else if (TSN_seq == at->rec.data.TSN_seq) {
1426 /* Software error since I have a dup? */
1430 * Ok, 'at' is larger than new chunk but does it
1431 * need to be right before it.
1433 tsn_est = TSN_seq + 1;
1434 if (tsn_est == at->rec.data.TSN_seq) {
1435 /* Yep, It better be a first */
1436 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1437 SCTP_DATA_FIRST_FRAG) {
1450 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1451 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1452 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1453 int *break_flag, int last_chunk)
1455 /* Process a data chunk */
1456 /* struct sctp_tmit_chunk *chk; */
1457 struct sctp_tmit_chunk *chk;
1461 int need_reasm_check = 0;
1462 uint16_t strmno, strmseq;
1464 struct sctp_queued_to_read *control;
1466 uint32_t protocol_id;
1467 uint8_t chunk_flags;
1468 struct sctp_stream_reset_list *liste;
1471 tsn = ntohl(ch->dp.tsn);
1472 chunk_flags = ch->ch.chunk_flags;
1473 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1474 asoc->send_sack = 1;
1476 protocol_id = ch->dp.protocol_id;
1477 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1478 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1479 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1484 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1485 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1486 asoc->cumulative_tsn == tsn) {
1487 /* It is a duplicate */
1488 SCTP_STAT_INCR(sctps_recvdupdata);
1489 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1490 /* Record a dup for the next outbound sack */
1491 asoc->dup_tsns[asoc->numduptsns] = tsn;
1494 asoc->send_sack = 1;
1497 /* Calculate the number of TSN's between the base and this TSN */
1498 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1499 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1500 /* Can't hold the bit in the mapping at max array, toss it */
1503 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1504 SCTP_TCB_LOCK_ASSERT(stcb);
1505 if (sctp_expand_mapping_array(asoc, gap)) {
1506 /* Can't expand, drop it */
1510 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1513 /* See if we have received this one already */
1514 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1515 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1516 SCTP_STAT_INCR(sctps_recvdupdata);
1517 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1518 /* Record a dup for the next outbound sack */
1519 asoc->dup_tsns[asoc->numduptsns] = tsn;
1522 asoc->send_sack = 1;
1526 * Check to see about the GONE flag, duplicates would cause a sack
1527 * to be sent up above
1529 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1530 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1531 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1534 * wait a minute, this guy is gone, there is no longer a
1535 * receiver. Send peer an ABORT!
1537 struct mbuf *op_err;
1539 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1540 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1545 * Now before going further we see if there is room. If NOT then we
1546 * MAY let one through only IF this TSN is the one we are waiting
1547 * for on a partial delivery API.
1550 /* now do the tests */
1551 if (((asoc->cnt_on_all_streams +
1552 asoc->cnt_on_reasm_queue +
1553 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1554 (((int)asoc->my_rwnd) <= 0)) {
1556 * When we have NO room in the rwnd we check to make sure
1557 * the reader is doing its job...
1559 if (stcb->sctp_socket->so_rcv.sb_cc) {
1560 /* some to read, wake-up */
1561 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1564 so = SCTP_INP_SO(stcb->sctp_ep);
1565 atomic_add_int(&stcb->asoc.refcnt, 1);
1566 SCTP_TCB_UNLOCK(stcb);
1567 SCTP_SOCKET_LOCK(so, 1);
1568 SCTP_TCB_LOCK(stcb);
1569 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1570 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1571 /* assoc was freed while we were unlocked */
1572 SCTP_SOCKET_UNLOCK(so, 1);
1576 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1577 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1578 SCTP_SOCKET_UNLOCK(so, 1);
1581 /* now is it in the mapping array of what we have accepted? */
1582 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
1583 compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1584 /* Nope not in the valid range dump it */
1585 sctp_set_rwnd(stcb, asoc);
1586 if ((asoc->cnt_on_all_streams +
1587 asoc->cnt_on_reasm_queue +
1588 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1589 SCTP_STAT_INCR(sctps_datadropchklmt);
1591 SCTP_STAT_INCR(sctps_datadroprwnd);
1598 strmno = ntohs(ch->dp.stream_id);
1599 if (strmno >= asoc->streamincnt) {
1600 struct sctp_paramhdr *phdr;
1603 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1604 0, M_DONTWAIT, 1, MT_DATA);
1606 /* add some space up front so prepend will work well */
1607 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1608 phdr = mtod(mb, struct sctp_paramhdr *);
1610 * Error causes are just param's and this one has
1611 * two back to back phdr, one with the error type
1612 * and size, the other with the streamid and a rsvd
1614 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1615 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1616 phdr->param_length =
1617 htons(sizeof(struct sctp_paramhdr) * 2);
1619 /* We insert the stream in the type field */
1620 phdr->param_type = ch->dp.stream_id;
1621 /* And set the length to 0 for the rsvd field */
1622 phdr->param_length = 0;
1623 sctp_queue_op_err(stcb, mb);
1625 SCTP_STAT_INCR(sctps_badsid);
1626 SCTP_TCB_LOCK_ASSERT(stcb);
1627 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1628 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1629 asoc->highest_tsn_inside_nr_map = tsn;
1631 if (tsn == (asoc->cumulative_tsn + 1)) {
1632 /* Update cum-ack */
1633 asoc->cumulative_tsn = tsn;
1638 * Before we continue lets validate that we are not being fooled by
1639 * an evil attacker. We can only have 4k chunks based on our TSN
1640 * spread allowed by the mapping array 512 * 8 bits, so there is no
1641 * way our stream sequence numbers could have wrapped. We of course
1642 * only validate the FIRST fragment so the bit must be set.
1644 strmseq = ntohs(ch->dp.stream_sequence);
1645 #ifdef SCTP_ASOCLOG_OF_TSNS
1646 SCTP_TCB_LOCK_ASSERT(stcb);
1647 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1648 asoc->tsn_in_at = 0;
1649 asoc->tsn_in_wrapped = 1;
1651 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1652 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1653 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1654 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1655 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1656 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1657 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1658 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1661 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1662 (TAILQ_EMPTY(&asoc->resetHead)) &&
1663 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1664 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1665 strmseq, MAX_SEQ) ||
1666 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1667 /* The incoming sseq is behind where we last delivered? */
1668 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1669 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1670 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1671 0, M_DONTWAIT, 1, MT_DATA);
1673 struct sctp_paramhdr *ph;
1676 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1677 (3 * sizeof(uint32_t));
1678 ph = mtod(oper, struct sctp_paramhdr *);
1679 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1680 ph->param_length = htons(SCTP_BUF_LEN(oper));
1681 ippp = (uint32_t *) (ph + 1);
1682 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1686 *ippp = ((strmno << 16) | strmseq);
1689 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1690 sctp_abort_an_association(stcb->sctp_ep, stcb,
1691 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1695 /************************************
1696 * From here down we may find ch-> invalid
1697 * so its a good idea NOT to use it.
1698 *************************************/
1700 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1701 if (last_chunk == 0) {
1702 dmbuf = SCTP_M_COPYM(*m,
1703 (offset + sizeof(struct sctp_data_chunk)),
1704 the_len, M_DONTWAIT);
1705 #ifdef SCTP_MBUF_LOGGING
1706 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1711 if (SCTP_BUF_IS_EXTENDED(mat)) {
1712 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1714 mat = SCTP_BUF_NEXT(mat);
1719 /* We can steal the last chunk */
1723 /* lop off the top part */
1724 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1725 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1726 l_len = SCTP_BUF_LEN(dmbuf);
1729 * need to count up the size hopefully does not hit
1737 l_len += SCTP_BUF_LEN(lat);
1738 lat = SCTP_BUF_NEXT(lat);
1741 if (l_len > the_len) {
1742 /* Trim the end round bytes off too */
1743 m_adj(dmbuf, -(l_len - the_len));
1746 if (dmbuf == NULL) {
1747 SCTP_STAT_INCR(sctps_nomem);
1750 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1751 asoc->fragmented_delivery_inprogress == 0 &&
1752 TAILQ_EMPTY(&asoc->resetHead) &&
1754 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1755 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1756 /* Candidate for express delivery */
1758 * Its not fragmented, No PD-API is up, Nothing in the
1759 * delivery queue, Its un-ordered OR ordered and the next to
1760 * deliver AND nothing else is stuck on the stream queue,
1761 * And there is room for it in the socket buffer. Lets just
1762 * stuff it up the buffer....
1765 /* It would be nice to avoid this copy if we could :< */
1766 sctp_alloc_a_readq(stcb, control);
1767 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1773 if (control == NULL) {
1774 goto failed_express_del;
1776 sctp_add_to_readq(stcb->sctp_ep, stcb,
1777 control, &stcb->sctp_socket->so_rcv,
1778 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1780 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1781 /* for ordered, bump what we delivered */
1782 asoc->strmin[strmno].last_sequence_delivered++;
1784 SCTP_STAT_INCR(sctps_recvexpress);
1785 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1786 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1787 SCTP_STR_LOG_FROM_EXPRS_DEL);
1791 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1792 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1793 asoc->highest_tsn_inside_nr_map = tsn;
1795 goto finish_express_del;
1798 /* If we reach here this is a new chunk */
1801 /* Express for fragmented delivery? */
1802 if ((asoc->fragmented_delivery_inprogress) &&
1803 (stcb->asoc.control_pdapi) &&
1804 (asoc->str_of_pdapi == strmno) &&
1805 (asoc->ssn_of_pdapi == strmseq)
1807 control = stcb->asoc.control_pdapi;
1808 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1809 /* Can't be another first? */
1810 goto failed_pdapi_express_del;
1812 if (tsn == (control->sinfo_tsn + 1)) {
1813 /* Yep, we can add it on */
1817 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1820 cumack = asoc->cumulative_tsn;
1821 if ((cumack + 1) == tsn)
1824 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1826 &stcb->sctp_socket->so_rcv)) {
1827 SCTP_PRINTF("Append fails end:%d\n", end);
1828 goto failed_pdapi_express_del;
1830 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1831 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1832 asoc->highest_tsn_inside_nr_map = tsn;
1834 SCTP_STAT_INCR(sctps_recvexpressm);
1835 control->sinfo_tsn = tsn;
1836 asoc->tsn_last_delivered = tsn;
1837 asoc->fragment_flags = chunk_flags;
1838 asoc->tsn_of_pdapi_last_delivered = tsn;
1839 asoc->last_flags_delivered = chunk_flags;
1840 asoc->last_strm_seq_delivered = strmseq;
1841 asoc->last_strm_no_delivered = strmno;
1843 /* clean up the flags and such */
1844 asoc->fragmented_delivery_inprogress = 0;
1845 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1846 asoc->strmin[strmno].last_sequence_delivered++;
1848 stcb->asoc.control_pdapi = NULL;
1849 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1851 * There could be another message
1854 need_reasm_check = 1;
1858 goto finish_express_del;
1861 failed_pdapi_express_del:
1863 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1864 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1865 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1866 asoc->highest_tsn_inside_nr_map = tsn;
1869 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1870 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1871 asoc->highest_tsn_inside_map = tsn;
1874 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1875 sctp_alloc_a_chunk(stcb, chk);
1877 /* No memory so we drop the chunk */
1878 SCTP_STAT_INCR(sctps_nomem);
1879 if (last_chunk == 0) {
1880 /* we copied it, free the copy */
1881 sctp_m_freem(dmbuf);
1885 chk->rec.data.TSN_seq = tsn;
1886 chk->no_fr_allowed = 0;
1887 chk->rec.data.stream_seq = strmseq;
1888 chk->rec.data.stream_number = strmno;
1889 chk->rec.data.payloadtype = protocol_id;
1890 chk->rec.data.context = stcb->asoc.context;
1891 chk->rec.data.doing_fast_retransmit = 0;
1892 chk->rec.data.rcv_flags = chunk_flags;
1894 chk->send_size = the_len;
1896 atomic_add_int(&net->ref_count, 1);
1899 sctp_alloc_a_readq(stcb, control);
1900 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1906 if (control == NULL) {
1907 /* No memory so we drop the chunk */
1908 SCTP_STAT_INCR(sctps_nomem);
1909 if (last_chunk == 0) {
1910 /* we copied it, free the copy */
1911 sctp_m_freem(dmbuf);
1915 control->length = the_len;
1918 /* Mark it as received */
1919 /* Now queue it where it belongs */
1920 if (control != NULL) {
1921 /* First a sanity check */
1922 if (asoc->fragmented_delivery_inprogress) {
1924 * Ok, we have a fragmented delivery in progress if
1925 * this chunk is next to deliver OR belongs in our
1926 * view to the reassembly, the peer is evil or
1929 uint32_t estimate_tsn;
1931 estimate_tsn = asoc->tsn_last_delivered + 1;
1932 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1933 (estimate_tsn == control->sinfo_tsn)) {
1934 /* Evil/Broke peer */
1935 sctp_m_freem(control->data);
1936 control->data = NULL;
1937 if (control->whoFrom) {
1938 sctp_free_remote_addr(control->whoFrom);
1939 control->whoFrom = NULL;
1941 sctp_free_a_readq(stcb, control);
1942 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1943 0, M_DONTWAIT, 1, MT_DATA);
1945 struct sctp_paramhdr *ph;
1948 SCTP_BUF_LEN(oper) =
1949 sizeof(struct sctp_paramhdr) +
1950 (3 * sizeof(uint32_t));
1951 ph = mtod(oper, struct sctp_paramhdr *);
1953 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1954 ph->param_length = htons(SCTP_BUF_LEN(oper));
1955 ippp = (uint32_t *) (ph + 1);
1956 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1960 *ippp = ((strmno << 16) | strmseq);
1962 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1963 sctp_abort_an_association(stcb->sctp_ep, stcb,
1964 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1969 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1970 sctp_m_freem(control->data);
1971 control->data = NULL;
1972 if (control->whoFrom) {
1973 sctp_free_remote_addr(control->whoFrom);
1974 control->whoFrom = NULL;
1976 sctp_free_a_readq(stcb, control);
1978 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1979 0, M_DONTWAIT, 1, MT_DATA);
1981 struct sctp_paramhdr *ph;
1984 SCTP_BUF_LEN(oper) =
1985 sizeof(struct sctp_paramhdr) +
1986 (3 * sizeof(uint32_t));
1988 struct sctp_paramhdr *);
1990 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1992 htons(SCTP_BUF_LEN(oper));
1993 ippp = (uint32_t *) (ph + 1);
1994 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1998 *ippp = ((strmno << 16) | strmseq);
2000 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2001 sctp_abort_an_association(stcb->sctp_ep,
2002 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2009 /* No PDAPI running */
2010 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2012 * Reassembly queue is NOT empty validate
2013 * that this tsn does not need to be in
2014 * reasembly queue. If it does then our peer
2015 * is broken or evil.
2017 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2018 sctp_m_freem(control->data);
2019 control->data = NULL;
2020 if (control->whoFrom) {
2021 sctp_free_remote_addr(control->whoFrom);
2022 control->whoFrom = NULL;
2024 sctp_free_a_readq(stcb, control);
2025 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2026 0, M_DONTWAIT, 1, MT_DATA);
2028 struct sctp_paramhdr *ph;
2031 SCTP_BUF_LEN(oper) =
2032 sizeof(struct sctp_paramhdr) +
2033 (3 * sizeof(uint32_t));
2035 struct sctp_paramhdr *);
2037 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2039 htons(SCTP_BUF_LEN(oper));
2040 ippp = (uint32_t *) (ph + 1);
2041 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2045 *ippp = ((strmno << 16) | strmseq);
2047 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2048 sctp_abort_an_association(stcb->sctp_ep,
2049 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2056 /* ok, if we reach here we have passed the sanity checks */
2057 if (chunk_flags & SCTP_DATA_UNORDERED) {
2058 /* queue directly into socket buffer */
2059 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2060 sctp_add_to_readq(stcb->sctp_ep, stcb,
2062 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2065 * Special check for when streams are resetting. We
2066 * could be more smart about this and check the
2067 * actual stream to see if it is not being reset..
2068 * that way we would not create a HOLB when amongst
2069 * streams being reset and those not being reset.
2071 * We take complete messages that have a stream reset
2072 * intervening (aka the TSN is after where our
2073 * cum-ack needs to be) off and put them on a
2074 * pending_reply_queue. The reassembly ones we do
2075 * not have to worry about since they are all sorted
2076 * and proceessed by TSN order. It is only the
2077 * singletons I must worry about.
2079 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2080 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2083 * yep its past where we need to reset... go
2084 * ahead and queue it.
2086 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2088 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2090 struct sctp_queued_to_read *ctlOn;
2091 unsigned char inserted = 0;
2093 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2095 if (compare_with_wrap(control->sinfo_tsn,
2096 ctlOn->sinfo_tsn, MAX_TSN)) {
2097 ctlOn = TAILQ_NEXT(ctlOn, next);
2100 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2105 if (inserted == 0) {
2107 * must be put at end, use
2108 * prevP (all setup from
2109 * loop) to setup nextP.
2111 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2115 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2122 /* Into the re-assembly queue */
2123 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2126 * the assoc is now gone and chk was put onto the
2127 * reasm queue, which has all been freed.
2134 if (tsn == (asoc->cumulative_tsn + 1)) {
2135 /* Update cum-ack */
2136 asoc->cumulative_tsn = tsn;
2142 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2144 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2146 SCTP_STAT_INCR(sctps_recvdata);
2147 /* Set it present please */
2148 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2149 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2151 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2152 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2153 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2155 /* check the special flag for stream resets */
2156 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2157 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2158 (asoc->cumulative_tsn == liste->tsn))
2161 * we have finished working through the backlogged TSN's now
2162 * time to reset streams. 1: call reset function. 2: free
2163 * pending_reply space 3: distribute any chunks in
2164 * pending_reply_queue.
2166 struct sctp_queued_to_read *ctl;
2168 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2169 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2170 SCTP_FREE(liste, SCTP_M_STRESET);
2171 /* sa_ignore FREED_MEMORY */
2172 liste = TAILQ_FIRST(&asoc->resetHead);
2173 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2174 if (ctl && (liste == NULL)) {
2175 /* All can be removed */
2177 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2178 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2182 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2185 /* more than one in queue */
2186 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2188 * if ctl->sinfo_tsn is <= liste->tsn we can
2189 * process it which is the NOT of
2190 * ctl->sinfo_tsn > liste->tsn
2192 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2193 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2197 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2201 * Now service re-assembly to pick up anything that has been
2202 * held on reassembly queue?
2204 sctp_deliver_reasm_check(stcb, asoc);
2205 need_reasm_check = 0;
2207 if (need_reasm_check) {
2208 /* Another one waits ? */
2209 sctp_deliver_reasm_check(stcb, asoc);
2214 int8_t sctp_map_lookup_tab[256] = {
2215 0, 1, 0, 2, 0, 1, 0, 3,
2216 0, 1, 0, 2, 0, 1, 0, 4,
2217 0, 1, 0, 2, 0, 1, 0, 3,
2218 0, 1, 0, 2, 0, 1, 0, 5,
2219 0, 1, 0, 2, 0, 1, 0, 3,
2220 0, 1, 0, 2, 0, 1, 0, 4,
2221 0, 1, 0, 2, 0, 1, 0, 3,
2222 0, 1, 0, 2, 0, 1, 0, 6,
2223 0, 1, 0, 2, 0, 1, 0, 3,
2224 0, 1, 0, 2, 0, 1, 0, 4,
2225 0, 1, 0, 2, 0, 1, 0, 3,
2226 0, 1, 0, 2, 0, 1, 0, 5,
2227 0, 1, 0, 2, 0, 1, 0, 3,
2228 0, 1, 0, 2, 0, 1, 0, 4,
2229 0, 1, 0, 2, 0, 1, 0, 3,
2230 0, 1, 0, 2, 0, 1, 0, 7,
2231 0, 1, 0, 2, 0, 1, 0, 3,
2232 0, 1, 0, 2, 0, 1, 0, 4,
2233 0, 1, 0, 2, 0, 1, 0, 3,
2234 0, 1, 0, 2, 0, 1, 0, 5,
2235 0, 1, 0, 2, 0, 1, 0, 3,
2236 0, 1, 0, 2, 0, 1, 0, 4,
2237 0, 1, 0, 2, 0, 1, 0, 3,
2238 0, 1, 0, 2, 0, 1, 0, 6,
2239 0, 1, 0, 2, 0, 1, 0, 3,
2240 0, 1, 0, 2, 0, 1, 0, 4,
2241 0, 1, 0, 2, 0, 1, 0, 3,
2242 0, 1, 0, 2, 0, 1, 0, 5,
2243 0, 1, 0, 2, 0, 1, 0, 3,
2244 0, 1, 0, 2, 0, 1, 0, 4,
2245 0, 1, 0, 2, 0, 1, 0, 3,
2246 0, 1, 0, 2, 0, 1, 0, 8
2251 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2254 * Now we also need to check the mapping array in a couple of ways.
2255 * 1) Did we move the cum-ack point?
2257 * When you first glance at this you might think that all entries that
2258 * make up the postion of the cum-ack would be in the nr-mapping
2259 * array only.. i.e. things up to the cum-ack are always
2260 * deliverable. Thats true with one exception, when its a fragmented
2261 * message we may not deliver the data until some threshold (or all
2262 * of it) is in place. So we must OR the nr_mapping_array and
2263 * mapping_array to get a true picture of the cum-ack.
2265 struct sctp_association *asoc;
2268 int slide_from, slide_end, lgap, distance;
2269 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2274 old_cumack = asoc->cumulative_tsn;
2275 old_base = asoc->mapping_array_base_tsn;
2276 old_highest = asoc->highest_tsn_inside_map;
2278 * We could probably improve this a small bit by calculating the
2279 * offset of the current cum-ack as the starting point.
2282 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2283 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2287 /* there is a 0 bit */
2288 at += sctp_map_lookup_tab[val];
2292 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2294 if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
2295 compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2297 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2298 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2300 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2301 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2302 sctp_print_mapping_array(asoc);
2303 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2304 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2306 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2307 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2310 if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2311 asoc->highest_tsn_inside_map,
2313 highest_tsn = asoc->highest_tsn_inside_nr_map;
2315 highest_tsn = asoc->highest_tsn_inside_map;
2317 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2318 /* The complete array was completed by a single FR */
2319 /* highest becomes the cum-ack */
2327 /* clear the array */
2328 clr = ((at + 7) >> 3);
2329 if (clr > asoc->mapping_array_size) {
2330 clr = asoc->mapping_array_size;
2332 memset(asoc->mapping_array, 0, clr);
2333 memset(asoc->nr_mapping_array, 0, clr);
2335 for (i = 0; i < asoc->mapping_array_size; i++) {
2336 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2337 printf("Error Mapping array's not clean at clear\n");
2338 sctp_print_mapping_array(asoc);
2342 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2343 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2344 } else if (at >= 8) {
2345 /* we can slide the mapping array down */
2346 /* slide_from holds where we hit the first NON 0xff byte */
2349 * now calculate the ceiling of the move using our highest
2352 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2353 slide_end = (lgap >> 3);
2354 if (slide_end < slide_from) {
2355 sctp_print_mapping_array(asoc);
2357 panic("impossible slide");
2359 printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2360 lgap, slide_end, slide_from, at);
2364 if (slide_end > asoc->mapping_array_size) {
2366 panic("would overrun buffer");
2368 printf("Gak, would have overrun map end:%d slide_end:%d\n",
2369 asoc->mapping_array_size, slide_end);
2370 slide_end = asoc->mapping_array_size;
2373 distance = (slide_end - slide_from) + 1;
2374 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2375 sctp_log_map(old_base, old_cumack, old_highest,
2376 SCTP_MAP_PREPARE_SLIDE);
2377 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2378 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2380 if (distance + slide_from > asoc->mapping_array_size ||
2383 * Here we do NOT slide forward the array so that
2384 * hopefully when more data comes in to fill it up
2385 * we will be able to slide it forward. Really I
2386 * don't think this should happen :-0
2389 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2390 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2391 (uint32_t) asoc->mapping_array_size,
2392 SCTP_MAP_SLIDE_NONE);
2397 for (ii = 0; ii < distance; ii++) {
2398 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2399 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2402 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2403 asoc->mapping_array[ii] = 0;
2404 asoc->nr_mapping_array[ii] = 0;
2406 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2407 asoc->highest_tsn_inside_map += (slide_from << 3);
2409 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2410 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2412 asoc->mapping_array_base_tsn += (slide_from << 3);
2413 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2414 sctp_log_map(asoc->mapping_array_base_tsn,
2415 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2416 SCTP_MAP_SLIDE_RESULT);
2424 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2426 struct sctp_association *asoc;
2427 uint32_t highest_tsn;
2430 if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2431 asoc->highest_tsn_inside_map,
2433 highest_tsn = asoc->highest_tsn_inside_nr_map;
2435 highest_tsn = asoc->highest_tsn_inside_map;
2439 * Now we need to see if we need to queue a sack or just start the
2440 * timer (if allowed).
2442 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2444 * Ok special case, in SHUTDOWN-SENT case. here we maker
2445 * sure SACK timer is off and instead send a SHUTDOWN and a
2448 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2449 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2450 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2452 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2453 sctp_send_sack(stcb);
2457 /* is there a gap now ? */
2458 is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
2461 * CMT DAC algorithm: increase number of packets received
2464 stcb->asoc.cmt_dac_pkts_rcvd++;
2466 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2468 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2470 (stcb->asoc.numduptsns) || /* we have dup's */
2471 (is_a_gap) || /* is still a gap */
2472 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2473 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2476 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2477 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2478 (stcb->asoc.send_sack == 0) &&
2479 (stcb->asoc.numduptsns == 0) &&
2480 (stcb->asoc.delayed_ack) &&
2481 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2484 * CMT DAC algorithm: With CMT, delay acks
2485 * even in the face of
2487 * reordering. Therefore, if acks that do not
2488 * have to be sent because of the above
2489 * reasons, will be delayed. That is, acks
2490 * that would have been sent due to gap
2491 * reports will be delayed with DAC. Start
2492 * the delayed ack timer.
2494 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2495 stcb->sctp_ep, stcb, NULL);
2498 * Ok we must build a SACK since the timer
2499 * is pending, we got our first packet OR
2500 * there are gaps or duplicates.
2502 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2503 sctp_send_sack(stcb);
2506 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2507 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2508 stcb->sctp_ep, stcb, NULL);
2515 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2517 struct sctp_tmit_chunk *chk;
2518 uint32_t tsize, pd_point;
2521 if (asoc->fragmented_delivery_inprogress) {
2522 sctp_service_reassembly(stcb, asoc);
2524 /* Can we proceed further, i.e. the PD-API is complete */
2525 if (asoc->fragmented_delivery_inprogress) {
2530 * Now is there some other chunk I can deliver from the reassembly
2534 chk = TAILQ_FIRST(&asoc->reasmqueue);
2536 asoc->size_on_reasm_queue = 0;
2537 asoc->cnt_on_reasm_queue = 0;
2540 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2541 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2542 ((nxt_todel == chk->rec.data.stream_seq) ||
2543 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2545 * Yep the first one is here. We setup to start reception,
2546 * by backing down the TSN just in case we can't deliver.
2550 * Before we start though either all of the message should
2551 * be here or the socket buffer max or nothing on the
2552 * delivery queue and something can be delivered.
2554 if (stcb->sctp_socket) {
2555 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2556 stcb->sctp_ep->partial_delivery_point);
2558 pd_point = stcb->sctp_ep->partial_delivery_point;
2560 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2561 asoc->fragmented_delivery_inprogress = 1;
2562 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2563 asoc->str_of_pdapi = chk->rec.data.stream_number;
2564 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2565 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2566 asoc->fragment_flags = chk->rec.data.rcv_flags;
2567 sctp_service_reassembly(stcb, asoc);
2568 if (asoc->fragmented_delivery_inprogress == 0) {
2576 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2577 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2578 struct sctp_nets *net, uint32_t * high_tsn)
2580 struct sctp_data_chunk *ch, chunk_buf;
2581 struct sctp_association *asoc;
2582 int num_chunks = 0; /* number of control chunks processed */
2584 int chk_length, break_flag, last_chunk;
2585 int abort_flag = 0, was_a_gap = 0;
2589 sctp_set_rwnd(stcb, &stcb->asoc);
2592 SCTP_TCB_LOCK_ASSERT(stcb);
2594 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2595 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2596 /* there was a gap before this data was processed */
2600 * setup where we got the last DATA packet from for any SACK that
2601 * may need to go out. Don't bump the net. This is done ONLY when a
2602 * chunk is assigned.
2604 asoc->last_data_chunk_from = net;
2607 * Now before we proceed we must figure out if this is a wasted
2608 * cluster... i.e. it is a small packet sent in and yet the driver
2609 * underneath allocated a full cluster for it. If so we must copy it
2610 * to a smaller mbuf and free up the cluster mbuf. This will help
2611 * with cluster starvation. Note for __Panda__ we don't do this
2612 * since it has clusters all the way down to 64 bytes.
2614 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2615 /* we only handle mbufs that are singletons.. not chains */
2616 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2618 /* ok lets see if we can copy the data up */
2621 /* get the pointers and copy */
2622 to = mtod(m, caddr_t *);
2623 from = mtod((*mm), caddr_t *);
2624 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2625 /* copy the length and free up the old */
2626 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2628 /* sucess, back copy */
2631 /* We are in trouble in the mbuf world .. yikes */
2635 /* get pointer to the first chunk header */
2636 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2637 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2642 * process all DATA chunks...
2644 *high_tsn = asoc->cumulative_tsn;
2646 asoc->data_pkts_seen++;
2647 while (stop_proc == 0) {
2648 /* validate chunk length */
2649 chk_length = ntohs(ch->ch.chunk_length);
2650 if (length - *offset < chk_length) {
2651 /* all done, mutulated chunk */
2655 if (ch->ch.chunk_type == SCTP_DATA) {
2656 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2658 * Need to send an abort since we had a
2659 * invalid data chunk.
2661 struct mbuf *op_err;
2663 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2664 0, M_DONTWAIT, 1, MT_DATA);
2667 struct sctp_paramhdr *ph;
2670 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2671 (2 * sizeof(uint32_t));
2672 ph = mtod(op_err, struct sctp_paramhdr *);
2674 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2675 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2676 ippp = (uint32_t *) (ph + 1);
2677 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2679 *ippp = asoc->cumulative_tsn;
2682 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2683 sctp_abort_association(inp, stcb, m, iphlen, sh,
2684 op_err, 0, net->port);
2687 #ifdef SCTP_AUDITING_ENABLED
2688 sctp_audit_log(0xB1, 0);
2690 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2695 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2696 chk_length, net, high_tsn, &abort_flag, &break_flag,
2705 * Set because of out of rwnd space and no
2706 * drop rep space left.
2712 /* not a data chunk in the data region */
2713 switch (ch->ch.chunk_type) {
2714 case SCTP_INITIATION:
2715 case SCTP_INITIATION_ACK:
2716 case SCTP_SELECTIVE_ACK:
2717 case SCTP_NR_SELECTIVE_ACK: /* EY */
2718 case SCTP_HEARTBEAT_REQUEST:
2719 case SCTP_HEARTBEAT_ACK:
2720 case SCTP_ABORT_ASSOCIATION:
2722 case SCTP_SHUTDOWN_ACK:
2723 case SCTP_OPERATION_ERROR:
2724 case SCTP_COOKIE_ECHO:
2725 case SCTP_COOKIE_ACK:
2728 case SCTP_SHUTDOWN_COMPLETE:
2729 case SCTP_AUTHENTICATION:
2730 case SCTP_ASCONF_ACK:
2731 case SCTP_PACKET_DROPPED:
2732 case SCTP_STREAM_RESET:
2733 case SCTP_FORWARD_CUM_TSN:
2736 * Now, what do we do with KNOWN chunks that
2737 * are NOT in the right place?
2739 * For now, I do nothing but ignore them. We
2740 * may later want to add sysctl stuff to
2741 * switch out and do either an ABORT() or
2742 * possibly process them.
2744 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2745 struct mbuf *op_err;
2747 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2748 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2753 /* unknown chunk type, use bit rules */
2754 if (ch->ch.chunk_type & 0x40) {
2755 /* Add a error report to the queue */
2757 struct sctp_paramhdr *phd;
2759 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2761 phd = mtod(merr, struct sctp_paramhdr *);
2763 * We cheat and use param
2764 * type since we did not
2765 * bother to define a error
2766 * cause struct. They are
2767 * the same basic format
2768 * with different names.
2771 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2773 htons(chk_length + sizeof(*phd));
2774 SCTP_BUF_LEN(merr) = sizeof(*phd);
2775 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2776 SCTP_SIZE32(chk_length),
2778 if (SCTP_BUF_NEXT(merr)) {
2779 sctp_queue_op_err(stcb, merr);
2785 if ((ch->ch.chunk_type & 0x80) == 0) {
2786 /* discard the rest of this packet */
2788 } /* else skip this bad chunk and
2791 }; /* switch of chunk type */
2793 *offset += SCTP_SIZE32(chk_length);
2794 if ((*offset >= length) || stop_proc) {
2795 /* no more data left in the mbuf chain */
2799 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2800 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2810 * we need to report rwnd overrun drops.
2812 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2816 * Did we get data, if so update the time for auto-close and
2817 * give peer credit for being alive.
2819 SCTP_STAT_INCR(sctps_recvpktwithdata);
2820 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2821 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2822 stcb->asoc.overall_error_count,
2824 SCTP_FROM_SCTP_INDATA,
2827 stcb->asoc.overall_error_count = 0;
2828 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2830 /* now service all of the reassm queue if needed */
2831 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2832 sctp_service_queues(stcb, asoc);
2834 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2835 /* Assure that we ack right away */
2836 stcb->asoc.send_sack = 1;
2838 /* Start a sack timer or QUEUE a SACK for sending */
2839 sctp_sack_check(stcb, was_a_gap, &abort_flag);
2847 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2848 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2850 uint32_t * biggest_newly_acked_tsn,
2851 uint32_t * this_sack_lowest_newack,
2854 struct sctp_tmit_chunk *tp1;
2855 unsigned int theTSN;
2856 int j, wake_him = 0, circled = 0;
2858 /* Recover the tp1 we last saw */
2861 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2863 for (j = frag_strt; j <= frag_end; j++) {
2864 theTSN = j + last_tsn;
2866 if (tp1->rec.data.doing_fast_retransmit)
2870 * CMT: CUCv2 algorithm. For each TSN being
2871 * processed from the sent queue, track the
2872 * next expected pseudo-cumack, or
2873 * rtx_pseudo_cumack, if required. Separate
2874 * cumack trackers for first transmissions,
2875 * and retransmissions.
2877 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2878 (tp1->snd_count == 1)) {
2879 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2880 tp1->whoTo->find_pseudo_cumack = 0;
2882 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2883 (tp1->snd_count > 1)) {
2884 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2885 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2887 if (tp1->rec.data.TSN_seq == theTSN) {
2888 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2890 * must be held until
2894 * ECN Nonce: Add the nonce
2895 * value to the sender's
2898 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2900 * If it is less than RESEND, it is
2901 * now no-longer in flight.
2902 * Higher values may already be set
2903 * via previous Gap Ack Blocks...
2904 * i.e. ACKED or RESEND.
2906 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2907 *biggest_newly_acked_tsn, MAX_TSN)) {
2908 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2911 * CMT: SFR algo (and HTNA) - set
2912 * saw_newack to 1 for dest being
2913 * newly acked. update
2914 * this_sack_highest_newack if
2917 if (tp1->rec.data.chunk_was_revoked == 0)
2918 tp1->whoTo->saw_newack = 1;
2920 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2921 tp1->whoTo->this_sack_highest_newack,
2923 tp1->whoTo->this_sack_highest_newack =
2924 tp1->rec.data.TSN_seq;
2927 * CMT DAC algo: also update
2928 * this_sack_lowest_newack
2930 if (*this_sack_lowest_newack == 0) {
2931 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2932 sctp_log_sack(*this_sack_lowest_newack,
2934 tp1->rec.data.TSN_seq,
2937 SCTP_LOG_TSN_ACKED);
2939 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2942 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2943 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2944 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2945 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2946 * Separate pseudo_cumack trackers for first transmissions and
2949 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2950 if (tp1->rec.data.chunk_was_revoked == 0) {
2951 tp1->whoTo->new_pseudo_cumack = 1;
2953 tp1->whoTo->find_pseudo_cumack = 1;
2955 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2956 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2958 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2959 if (tp1->rec.data.chunk_was_revoked == 0) {
2960 tp1->whoTo->new_pseudo_cumack = 1;
2962 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2964 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2965 sctp_log_sack(*biggest_newly_acked_tsn,
2967 tp1->rec.data.TSN_seq,
2970 SCTP_LOG_TSN_ACKED);
2972 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2973 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2974 tp1->whoTo->flight_size,
2976 (uintptr_t) tp1->whoTo,
2977 tp1->rec.data.TSN_seq);
2979 sctp_flight_size_decrease(tp1);
2980 sctp_total_flight_decrease(stcb, tp1);
2982 tp1->whoTo->net_ack += tp1->send_size;
2983 if (tp1->snd_count < 2) {
2985 * True non-retransmited chunk
2987 tp1->whoTo->net_ack2 += tp1->send_size;
2994 sctp_calculate_rto(stcb,
2997 &tp1->sent_rcv_time,
2998 sctp_align_safe_nocopy);
3003 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3004 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3005 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3006 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3007 stcb->asoc.this_sack_highest_gap,
3009 stcb->asoc.this_sack_highest_gap =
3010 tp1->rec.data.TSN_seq;
3012 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3013 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3014 #ifdef SCTP_AUDITING_ENABLED
3015 sctp_audit_log(0xB2,
3016 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3021 * All chunks NOT UNSENT fall through here and are marked
3022 * (leave PR-SCTP ones that are to skip alone though)
3024 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3025 tp1->sent = SCTP_DATAGRAM_MARKED;
3027 if (tp1->rec.data.chunk_was_revoked) {
3028 /* deflate the cwnd */
3029 tp1->whoTo->cwnd -= tp1->book_size;
3030 tp1->rec.data.chunk_was_revoked = 0;
3032 /* NR Sack code here */
3039 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3040 sctp_m_freem(tp1->data);
3047 } /* if (tp1->TSN_seq == theTSN) */
3048 if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3052 tp1 = TAILQ_NEXT(tp1, sctp_next);
3053 if ((tp1 == NULL) && (circled == 0)) {
3055 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3057 } /* end while (tp1) */
3060 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3062 /* In case the fragments were not in order we must reset */
3063 } /* end for (j = fragStart */
3065 return (wake_him); /* Return value only used for nr-sack */
3070 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3071 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3072 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3073 int num_seg, int num_nr_seg, int *ecn_seg_sums)
3075 struct sctp_gap_ack_block *frag, block;
3076 struct sctp_tmit_chunk *tp1;
3081 uint16_t frag_strt, frag_end;
3082 uint32_t last_frag_high;
3088 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3089 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3090 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3091 *offset += sizeof(block);
3093 return (chunk_freed);
3095 frag_strt = ntohs(frag->start);
3096 frag_end = ntohs(frag->end);
3097 /* some sanity checks on the fragment offsets */
3098 if (frag_strt > frag_end) {
3099 /* this one is malformed, skip */
3102 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3104 *biggest_tsn_acked = frag_end + last_tsn;
3106 /* mark acked dgs and find out the highestTSN being acked */
3108 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3109 /* save the locations of the last frags */
3110 last_frag_high = frag_end + last_tsn;
3113 * now lets see if we need to reset the queue due to
3114 * a out-of-order SACK fragment
3116 if (compare_with_wrap(frag_strt + last_tsn,
3117 last_frag_high, MAX_TSN)) {
3119 * if the new frag starts after the last TSN
3120 * frag covered, we are ok and this one is
3121 * beyond the last one
3126 * ok, they have reset us, so we need to
3127 * reset the queue this will cause extra
3128 * hunting but hey, they chose the
3129 * performance hit when they failed to order
3132 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3134 last_frag_high = frag_end + last_tsn;
3144 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3145 non_revocable, &num_frs, biggest_newly_acked_tsn,
3146 this_sack_lowest_newack, ecn_seg_sums)) {
3150 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3152 sctp_log_fr(*biggest_tsn_acked,
3153 *biggest_newly_acked_tsn,
3154 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3156 return (chunk_freed);
3160 sctp_check_for_revoked(struct sctp_tcb *stcb,
3161 struct sctp_association *asoc, uint32_t cumack,
3162 uint32_t biggest_tsn_acked)
3164 struct sctp_tmit_chunk *tp1;
3165 int tot_revoked = 0;
3167 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3169 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3172 * ok this guy is either ACK or MARKED. If it is
3173 * ACKED it has been previously acked but not this
3174 * time i.e. revoked. If it is MARKED it was ACK'ed
3177 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3182 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3183 /* it has been revoked */
3184 tp1->sent = SCTP_DATAGRAM_SENT;
3185 tp1->rec.data.chunk_was_revoked = 1;
3187 * We must add this stuff back in to assure
3188 * timers and such get started.
3190 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3191 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3192 tp1->whoTo->flight_size,
3194 (uintptr_t) tp1->whoTo,
3195 tp1->rec.data.TSN_seq);
3197 sctp_flight_size_increase(tp1);
3198 sctp_total_flight_increase(stcb, tp1);
3200 * We inflate the cwnd to compensate for our
3201 * artificial inflation of the flight_size.
3203 tp1->whoTo->cwnd += tp1->book_size;
3205 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3206 sctp_log_sack(asoc->last_acked_seq,
3208 tp1->rec.data.TSN_seq,
3211 SCTP_LOG_TSN_REVOKED);
3213 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3214 /* it has been re-acked in this SACK */
3215 tp1->sent = SCTP_DATAGRAM_ACKED;
3218 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3220 tp1 = TAILQ_NEXT(tp1, sctp_next);
3222 if (tot_revoked > 0) {
3224 * Setup the ecn nonce re-sync point. We do this since once
3225 * data is revoked we begin to retransmit things, which do
3226 * NOT have the ECN bits set. This means we are now out of
3227 * sync and must wait until we get back in sync with the
3228 * peer to check ECN bits.
3230 tp1 = TAILQ_FIRST(&asoc->send_queue);
3232 asoc->nonce_resync_tsn = asoc->sending_seq;
3234 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3236 asoc->nonce_wait_for_ecne = 0;
3237 asoc->nonce_sum_check = 0;
3243 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3244 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3246 struct sctp_tmit_chunk *tp1;
3247 int strike_flag = 0;
3249 int tot_retrans = 0;
3250 uint32_t sending_seq;
3251 struct sctp_nets *net;
3252 int num_dests_sacked = 0;
3255 * select the sending_seq, this is either the next thing ready to be
3256 * sent but not transmitted, OR, the next seq we assign.
3258 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3260 sending_seq = asoc->sending_seq;
3262 sending_seq = tp1->rec.data.TSN_seq;
3265 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3266 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3267 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3268 if (net->saw_newack)
3272 if (stcb->asoc.peer_supports_prsctp) {
3273 (void)SCTP_GETTIME_TIMEVAL(&now);
3275 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3278 if (tp1->no_fr_allowed) {
3279 /* this one had a timeout or something */
3280 tp1 = TAILQ_NEXT(tp1, sctp_next);
3283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3284 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3285 sctp_log_fr(biggest_tsn_newly_acked,
3286 tp1->rec.data.TSN_seq,
3288 SCTP_FR_LOG_CHECK_STRIKE);
3290 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3292 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3296 if (stcb->asoc.peer_supports_prsctp) {
3297 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3298 /* Is it expired? */
3301 * TODO sctp_constants.h needs alternative
3302 * time macros when _KERNEL is undefined.
3304 (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3306 /* Yes so drop it */
3307 if (tp1->data != NULL) {
3308 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3309 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3310 SCTP_SO_NOT_LOCKED);
3312 tp1 = TAILQ_NEXT(tp1, sctp_next);
3317 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3318 asoc->this_sack_highest_gap, MAX_TSN)) {
3319 /* we are beyond the tsn in the sack */
3322 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3323 /* either a RESEND, ACKED, or MARKED */
3325 tp1 = TAILQ_NEXT(tp1, sctp_next);
3329 * CMT : SFR algo (covers part of DAC and HTNA as well)
3331 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3333 * No new acks were receieved for data sent to this
3334 * dest. Therefore, according to the SFR algo for
3335 * CMT, no data sent to this dest can be marked for
3336 * FR using this SACK.
3338 tp1 = TAILQ_NEXT(tp1, sctp_next);
3340 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3341 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3343 * CMT: New acks were receieved for data sent to
3344 * this dest. But no new acks were seen for data
3345 * sent after tp1. Therefore, according to the SFR
3346 * algo for CMT, tp1 cannot be marked for FR using
3347 * this SACK. This step covers part of the DAC algo
3348 * and the HTNA algo as well.
3350 tp1 = TAILQ_NEXT(tp1, sctp_next);
3354 * Here we check to see if we were have already done a FR
3355 * and if so we see if the biggest TSN we saw in the sack is
3356 * smaller than the recovery point. If so we don't strike
3357 * the tsn... otherwise we CAN strike the TSN.
3360 * @@@ JRI: Check for CMT if (accum_moved &&
3361 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3364 if (accum_moved && asoc->fast_retran_loss_recovery) {
3366 * Strike the TSN if in fast-recovery and cum-ack
3369 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3370 sctp_log_fr(biggest_tsn_newly_acked,
3371 tp1->rec.data.TSN_seq,
3373 SCTP_FR_LOG_STRIKE_CHUNK);
3375 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3378 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3380 * CMT DAC algorithm: If SACK flag is set to
3381 * 0, then lowest_newack test will not pass
3382 * because it would have been set to the
3383 * cumack earlier. If not already to be
3384 * rtx'd, If not a mixed sack and if tp1 is
3385 * not between two sacked TSNs, then mark by
3386 * one more. NOTE that we are marking by one
3387 * additional time since the SACK DAC flag
3388 * indicates that two packets have been
3389 * received after this missing TSN.
3391 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3392 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3394 sctp_log_fr(16 + num_dests_sacked,
3395 tp1->rec.data.TSN_seq,
3397 SCTP_FR_LOG_STRIKE_CHUNK);
3402 } else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3404 * For those that have done a FR we must take
3405 * special consideration if we strike. I.e the
3406 * biggest_newly_acked must be higher than the
3407 * sending_seq at the time we did the FR.
3410 #ifdef SCTP_FR_TO_ALTERNATE
3412 * If FR's go to new networks, then we must only do
3413 * this for singly homed asoc's. However if the FR's
3414 * go to the same network (Armando's work) then its
3415 * ok to FR multiple times.
3423 if ((compare_with_wrap(biggest_tsn_newly_acked,
3424 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3425 (biggest_tsn_newly_acked ==
3426 tp1->rec.data.fast_retran_tsn)) {
3428 * Strike the TSN, since this ack is
3429 * beyond where things were when we
3432 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3433 sctp_log_fr(biggest_tsn_newly_acked,
3434 tp1->rec.data.TSN_seq,
3436 SCTP_FR_LOG_STRIKE_CHUNK);
3438 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3442 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3444 * CMT DAC algorithm: If
3445 * SACK flag is set to 0,
3446 * then lowest_newack test
3447 * will not pass because it
3448 * would have been set to
3449 * the cumack earlier. If
3450 * not already to be rtx'd,
3451 * If not a mixed sack and
3452 * if tp1 is not between two
3453 * sacked TSNs, then mark by
3454 * one more. NOTE that we
3455 * are marking by one
3456 * additional time since the
3457 * SACK DAC flag indicates
3458 * that two packets have
3459 * been received after this
3462 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3463 (num_dests_sacked == 1) &&
3464 compare_with_wrap(this_sack_lowest_newack,
3465 tp1->rec.data.TSN_seq, MAX_TSN)) {
3466 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3467 sctp_log_fr(32 + num_dests_sacked,
3468 tp1->rec.data.TSN_seq,
3470 SCTP_FR_LOG_STRIKE_CHUNK);
3472 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3480 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3483 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3484 biggest_tsn_newly_acked, MAX_TSN)) {
3486 * We don't strike these: This is the HTNA
3487 * algorithm i.e. we don't strike If our TSN is
3488 * larger than the Highest TSN Newly Acked.
3492 /* Strike the TSN */
3493 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3494 sctp_log_fr(biggest_tsn_newly_acked,
3495 tp1->rec.data.TSN_seq,
3497 SCTP_FR_LOG_STRIKE_CHUNK);
3499 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3502 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3504 * CMT DAC algorithm: If SACK flag is set to
3505 * 0, then lowest_newack test will not pass
3506 * because it would have been set to the
3507 * cumack earlier. If not already to be
3508 * rtx'd, If not a mixed sack and if tp1 is
3509 * not between two sacked TSNs, then mark by
3510 * one more. NOTE that we are marking by one
3511 * additional time since the SACK DAC flag
3512 * indicates that two packets have been
3513 * received after this missing TSN.
3515 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3516 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3517 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3518 sctp_log_fr(48 + num_dests_sacked,
3519 tp1->rec.data.TSN_seq,
3521 SCTP_FR_LOG_STRIKE_CHUNK);
3527 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3528 struct sctp_nets *alt;
3530 /* fix counts and things */
3531 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3532 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3533 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3535 (uintptr_t) tp1->whoTo,
3536 tp1->rec.data.TSN_seq);
3539 tp1->whoTo->net_ack++;
3540 sctp_flight_size_decrease(tp1);
3542 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3543 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3544 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3546 /* add back to the rwnd */
3547 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3549 /* remove from the total flight */
3550 sctp_total_flight_decrease(stcb, tp1);
3552 if ((stcb->asoc.peer_supports_prsctp) &&
3553 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3555 * Has it been retransmitted tv_sec times? -
3556 * we store the retran count there.
3558 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3559 /* Yes, so drop it */
3560 if (tp1->data != NULL) {
3561 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3562 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3563 SCTP_SO_NOT_LOCKED);
3565 /* Make sure to flag we had a FR */
3566 tp1->whoTo->net_ack++;
3567 tp1 = TAILQ_NEXT(tp1, sctp_next);
3571 /* printf("OK, we are now ready to FR this guy\n"); */
3572 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3573 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3577 /* This is a subsequent FR */
3578 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3580 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3581 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3583 * CMT: Using RTX_SSTHRESH policy for CMT.
3584 * If CMT is being used, then pick dest with
3585 * largest ssthresh for any retransmission.
3587 tp1->no_fr_allowed = 1;
3589 /* sa_ignore NO_NULL_CHK */
3590 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3592 * JRS 5/18/07 - If CMT PF is on,
3593 * use the PF version of
3596 alt = sctp_find_alternate_net(stcb, alt, 2);
3599 * JRS 5/18/07 - If only CMT is on,
3600 * use the CMT version of
3603 /* sa_ignore NO_NULL_CHK */
3604 alt = sctp_find_alternate_net(stcb, alt, 1);
3610 * CUCv2: If a different dest is picked for
3611 * the retransmission, then new
3612 * (rtx-)pseudo_cumack needs to be tracked
3613 * for orig dest. Let CUCv2 track new (rtx-)
3614 * pseudo-cumack always.
3617 tp1->whoTo->find_pseudo_cumack = 1;
3618 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3620 } else {/* CMT is OFF */
3622 #ifdef SCTP_FR_TO_ALTERNATE
3623 /* Can we find an alternate? */
3624 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3627 * default behavior is to NOT retransmit
3628 * FR's to an alternate. Armando Caro's
3629 * paper details why.
3635 tp1->rec.data.doing_fast_retransmit = 1;
3637 /* mark the sending seq for possible subsequent FR's */
3639 * printf("Marking TSN for FR new value %x\n",
3640 * (uint32_t)tpi->rec.data.TSN_seq);
3642 if (TAILQ_EMPTY(&asoc->send_queue)) {
3644 * If the queue of send is empty then its
3645 * the next sequence number that will be
3646 * assigned so we subtract one from this to
3647 * get the one we last sent.
3649 tp1->rec.data.fast_retran_tsn = sending_seq;
3652 * If there are chunks on the send queue
3653 * (unsent data that has made it from the
3654 * stream queues but not out the door, we
3655 * take the first one (which will have the
3656 * lowest TSN) and subtract one to get the
3659 struct sctp_tmit_chunk *ttt;
3661 ttt = TAILQ_FIRST(&asoc->send_queue);
3662 tp1->rec.data.fast_retran_tsn =
3663 ttt->rec.data.TSN_seq;
3668 * this guy had a RTO calculation pending on
3673 if (alt != tp1->whoTo) {
3674 /* yes, there is an alternate. */
3675 sctp_free_remote_addr(tp1->whoTo);
3676 /* sa_ignore FREED_MEMORY */
3678 atomic_add_int(&alt->ref_count, 1);
3681 tp1 = TAILQ_NEXT(tp1, sctp_next);
3684 if (tot_retrans > 0) {
3686 * Setup the ecn nonce re-sync point. We do this since once
3687 * we go to FR something we introduce a Karn's rule scenario
3688 * and won't know the totals for the ECN bits.
3690 asoc->nonce_resync_tsn = sending_seq;
3691 asoc->nonce_wait_for_ecne = 0;
3692 asoc->nonce_sum_check = 0;
3696 struct sctp_tmit_chunk *
3697 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3698 struct sctp_association *asoc)
3700 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3704 if (asoc->peer_supports_prsctp == 0) {
3707 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3709 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3710 tp1->sent != SCTP_DATAGRAM_ACKED &&
3711 tp1->sent != SCTP_DATAGRAM_RESEND) {
3712 /* no chance to advance, out of here */
3715 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3716 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3717 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3718 asoc->advanced_peer_ack_point,
3719 tp1->rec.data.TSN_seq, 0, 0);
3722 if (!PR_SCTP_ENABLED(tp1->flags)) {
3724 * We can't fwd-tsn past any that are reliable aka
3725 * retransmitted until the asoc fails.
3730 (void)SCTP_GETTIME_TIMEVAL(&now);
3733 tp2 = TAILQ_NEXT(tp1, sctp_next);
3735 * now we got a chunk which is marked for another
3736 * retransmission to a PR-stream but has run out its chances
3737 * already maybe OR has been marked to skip now. Can we skip
3738 * it if its a resend?
3740 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3741 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3743 * Now is this one marked for resend and its time is
3746 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3747 /* Yes so drop it */
3749 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3750 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3751 SCTP_SO_NOT_LOCKED);
3755 * No, we are done when hit one for resend
3756 * whos time as not expired.
3762 * Ok now if this chunk is marked to drop it we can clean up
3763 * the chunk, advance our peer ack point and we can check
3766 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3767 (tp1->sent == SCTP_DATAGRAM_ACKED)) {
3768 /* advance PeerAckPoint goes forward */
3769 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3770 asoc->advanced_peer_ack_point,
3773 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3775 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3776 /* No update but we do save the chk */
3781 * If it is still in RESEND we can advance no
3787 * If we hit here we just dumped tp1, move to next tsn on
3796 sctp_fs_audit(struct sctp_association *asoc)
3798 struct sctp_tmit_chunk *chk;
3799 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3800 int entry_flight, entry_cnt, ret;
3802 entry_flight = asoc->total_flight;
3803 entry_cnt = asoc->total_flight_count;
3806 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3809 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3810 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3811 printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3812 chk->rec.data.TSN_seq,
3817 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3819 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3821 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3828 if ((inflight > 0) || (inbetween > 0)) {
3830 panic("Flight size-express incorrect? \n");
3832 printf("asoc->total_flight:%d cnt:%d\n",
3833 entry_flight, entry_cnt);
3835 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3836 inflight, inbetween, resend, above, acked);
3845 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3846 struct sctp_association *asoc,
3847 struct sctp_nets *net,
3848 struct sctp_tmit_chunk *tp1)
3850 tp1->window_probe = 0;
3851 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3852 /* TSN's skipped we do NOT move back. */
3853 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3854 tp1->whoTo->flight_size,
3856 (uintptr_t) tp1->whoTo,
3857 tp1->rec.data.TSN_seq);
3860 /* First setup this by shrinking flight */
3861 sctp_flight_size_decrease(tp1);
3862 sctp_total_flight_decrease(stcb, tp1);
3863 /* Now mark for resend */
3864 tp1->sent = SCTP_DATAGRAM_RESEND;
3865 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3867 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3868 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3869 tp1->whoTo->flight_size,
3871 (uintptr_t) tp1->whoTo,
3872 tp1->rec.data.TSN_seq);
3877 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3878 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3880 struct sctp_nets *net;
3881 struct sctp_association *asoc;
3882 struct sctp_tmit_chunk *tp1, *tp2;
3884 int win_probe_recovery = 0;
3885 int win_probe_recovered = 0;
3886 int j, done_once = 0;
3888 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3889 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3890 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3892 SCTP_TCB_LOCK_ASSERT(stcb);
3893 #ifdef SCTP_ASOCLOG_OF_TSNS
3894 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3895 stcb->asoc.cumack_log_at++;
3896 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3897 stcb->asoc.cumack_log_at = 0;
3901 old_rwnd = asoc->peers_rwnd;
3902 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
3905 } else if (asoc->last_acked_seq == cumack) {
3906 /* Window update sack */
3907 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3908 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3909 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3910 /* SWS sender side engages */
3911 asoc->peers_rwnd = 0;
3913 if (asoc->peers_rwnd > old_rwnd) {
3918 /* First setup for CC stuff */
3919 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3920 net->prev_cwnd = net->cwnd;
3925 * CMT: Reset CUC and Fast recovery algo variables before
3928 net->new_pseudo_cumack = 0;
3929 net->will_exit_fast_recovery = 0;
3931 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3934 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3935 tp1 = TAILQ_LAST(&asoc->sent_queue,
3936 sctpchunk_listhead);
3937 send_s = tp1->rec.data.TSN_seq + 1;
3939 send_s = asoc->sending_seq;
3941 if ((cumack == send_s) ||
3942 compare_with_wrap(cumack, send_s, MAX_TSN)) {
3948 panic("Impossible sack 1");
3953 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3954 0, M_DONTWAIT, 1, MT_DATA);
3956 struct sctp_paramhdr *ph;
3959 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3961 ph = mtod(oper, struct sctp_paramhdr *);
3962 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3963 ph->param_length = htons(SCTP_BUF_LEN(oper));
3964 ippp = (uint32_t *) (ph + 1);
3965 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3967 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3968 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3973 asoc->this_sack_highest_gap = cumack;
3974 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3975 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3976 stcb->asoc.overall_error_count,
3978 SCTP_FROM_SCTP_INDATA,
3981 stcb->asoc.overall_error_count = 0;
3982 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
3983 /* process the new consecutive TSN first */
3984 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3986 tp2 = TAILQ_NEXT(tp1, sctp_next);
3987 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
3989 cumack == tp1->rec.data.TSN_seq) {
3990 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3991 printf("Warning, an unsent is now acked?\n");
3994 * ECN Nonce: Add the nonce to the sender's
3997 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3998 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4000 * If it is less than ACKED, it is
4001 * now no-longer in flight. Higher
4002 * values may occur during marking
4004 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4005 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4006 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4007 tp1->whoTo->flight_size,
4009 (uintptr_t) tp1->whoTo,
4010 tp1->rec.data.TSN_seq);
4012 sctp_flight_size_decrease(tp1);
4013 /* sa_ignore NO_NULL_CHK */
4014 sctp_total_flight_decrease(stcb, tp1);
4016 tp1->whoTo->net_ack += tp1->send_size;
4017 if (tp1->snd_count < 2) {
4019 * True non-retransmited
4022 tp1->whoTo->net_ack2 +=
4025 /* update RTO too? */
4032 sctp_calculate_rto(stcb,
4034 &tp1->sent_rcv_time,
4035 sctp_align_safe_nocopy);
4040 * CMT: CUCv2 algorithm. From the
4041 * cumack'd TSNs, for each TSN being
4042 * acked for the first time, set the
4043 * following variables for the
4044 * corresp destination.
4045 * new_pseudo_cumack will trigger a
4047 * find_(rtx_)pseudo_cumack will
4048 * trigger search for the next
4049 * expected (rtx-)pseudo-cumack.
4051 tp1->whoTo->new_pseudo_cumack = 1;
4052 tp1->whoTo->find_pseudo_cumack = 1;
4053 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4055 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4056 /* sa_ignore NO_NULL_CHK */
4057 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4060 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4061 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4063 if (tp1->rec.data.chunk_was_revoked) {
4064 /* deflate the cwnd */
4065 tp1->whoTo->cwnd -= tp1->book_size;
4066 tp1->rec.data.chunk_was_revoked = 0;
4068 tp1->sent = SCTP_DATAGRAM_ACKED;
4069 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4071 /* sa_ignore NO_NULL_CHK */
4072 sctp_free_bufspace(stcb, asoc, tp1, 1);
4073 sctp_m_freem(tp1->data);
4075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4076 sctp_log_sack(asoc->last_acked_seq,
4078 tp1->rec.data.TSN_seq,
4081 SCTP_LOG_FREE_SENT);
4084 asoc->sent_queue_cnt--;
4085 sctp_free_a_chunk(stcb, tp1);
4093 /* sa_ignore NO_NULL_CHK */
4094 if (stcb->sctp_socket) {
4095 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4099 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4100 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4101 /* sa_ignore NO_NULL_CHK */
4102 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4104 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4105 so = SCTP_INP_SO(stcb->sctp_ep);
4106 atomic_add_int(&stcb->asoc.refcnt, 1);
4107 SCTP_TCB_UNLOCK(stcb);
4108 SCTP_SOCKET_LOCK(so, 1);
4109 SCTP_TCB_LOCK(stcb);
4110 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4111 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4112 /* assoc was freed while we were unlocked */
4113 SCTP_SOCKET_UNLOCK(so, 1);
4117 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4118 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4119 SCTP_SOCKET_UNLOCK(so, 1);
4122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4123 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4127 /* JRS - Use the congestion control given in the CC module */
4128 if (asoc->last_acked_seq != cumack)
4129 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4131 asoc->last_acked_seq = cumack;
4133 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4134 /* nothing left in-flight */
4135 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4136 net->flight_size = 0;
4137 net->partial_bytes_acked = 0;
4139 asoc->total_flight = 0;
4140 asoc->total_flight_count = 0;
4142 /* ECN Nonce updates */
4143 if (asoc->ecn_nonce_allowed) {
4144 if (asoc->nonce_sum_check) {
4145 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4146 if (asoc->nonce_wait_for_ecne == 0) {
4147 struct sctp_tmit_chunk *lchk;
4149 lchk = TAILQ_FIRST(&asoc->send_queue);
4150 asoc->nonce_wait_for_ecne = 1;
4152 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4154 asoc->nonce_wait_tsn = asoc->sending_seq;
4157 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4158 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4160 * Misbehaving peer. We need
4161 * to react to this guy
4163 asoc->ecn_allowed = 0;
4164 asoc->ecn_nonce_allowed = 0;
4169 /* See if Resynchronization Possible */
4170 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4171 asoc->nonce_sum_check = 1;
4173 * Now we must calculate what the base is.
4174 * We do this based on two things, we know
4175 * the total's for all the segments
4176 * gap-acked in the SACK (none). We also
4177 * know the SACK's nonce sum, its in
4178 * nonce_sum_flag. So we can build a truth
4179 * table to back-calculate the new value of
4180 * asoc->nonce_sum_expect_base:
4182 * SACK-flag-Value Seg-Sums Base 0 0 0
4186 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4191 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4192 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4193 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4194 /* SWS sender side engages */
4195 asoc->peers_rwnd = 0;
4197 if (asoc->peers_rwnd > old_rwnd) {
4198 win_probe_recovery = 1;
4200 /* Now assure a timer where data is queued at */
4203 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4206 if (win_probe_recovery && (net->window_probe)) {
4207 win_probe_recovered = 1;
4209 * Find first chunk that was used with window probe
4210 * and clear the sent
4212 /* sa_ignore FREED_MEMORY */
4213 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4214 if (tp1->window_probe) {
4215 /* move back to data send queue */
4216 sctp_window_probe_recovery(stcb, asoc, net, tp1);
4221 if (net->RTO == 0) {
4222 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4224 to_ticks = MSEC_TO_TICKS(net->RTO);
4226 if (net->flight_size) {
4228 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4229 sctp_timeout_handler, &net->rxt_timer);
4230 if (net->window_probe) {
4231 net->window_probe = 0;
4234 if (net->window_probe) {
4236 * In window probes we must assure a timer
4237 * is still running there
4239 net->window_probe = 0;
4240 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4241 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4242 sctp_timeout_handler, &net->rxt_timer);
4244 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4245 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4247 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4249 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4250 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4251 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4252 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4253 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4259 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4260 (asoc->sent_queue_retran_cnt == 0) &&
4261 (win_probe_recovered == 0) &&
4264 * huh, this should not happen unless all packets are
4265 * PR-SCTP and marked to skip of course.
4267 if (sctp_fs_audit(asoc)) {
4268 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4269 net->flight_size = 0;
4271 asoc->total_flight = 0;
4272 asoc->total_flight_count = 0;
4273 asoc->sent_queue_retran_cnt = 0;
4274 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4275 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4276 sctp_flight_size_increase(tp1);
4277 sctp_total_flight_increase(stcb, tp1);
4278 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4279 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4286 /**********************************/
4287 /* Now what about shutdown issues */
4288 /**********************************/
4289 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4290 /* nothing left on sendqueue.. consider done */
4292 if ((asoc->stream_queue_cnt == 1) &&
4293 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4294 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4295 (asoc->locked_on_sending)
4297 struct sctp_stream_queue_pending *sp;
4300 * I may be in a state where we got all across.. but
4301 * cannot write more due to a shutdown... we abort
4302 * since the user did not indicate EOR in this case.
4303 * The sp will be cleaned during free of the asoc.
4305 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4307 if ((sp) && (sp->length == 0)) {
4308 /* Let cleanup code purge it */
4309 if (sp->msg_is_complete) {
4310 asoc->stream_queue_cnt--;
4312 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4313 asoc->locked_on_sending = NULL;
4314 asoc->stream_queue_cnt--;
4318 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4319 (asoc->stream_queue_cnt == 0)) {
4320 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4321 /* Need to abort here */
4327 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4328 0, M_DONTWAIT, 1, MT_DATA);
4330 struct sctp_paramhdr *ph;
4333 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4335 ph = mtod(oper, struct sctp_paramhdr *);
4336 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4337 ph->param_length = htons(SCTP_BUF_LEN(oper));
4338 ippp = (uint32_t *) (ph + 1);
4339 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4341 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4342 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4344 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4345 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4346 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4348 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4349 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4350 sctp_stop_timers_for_shutdown(stcb);
4351 sctp_send_shutdown(stcb,
4352 stcb->asoc.primary_destination);
4353 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4354 stcb->sctp_ep, stcb, asoc->primary_destination);
4355 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4356 stcb->sctp_ep, stcb, asoc->primary_destination);
4358 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4359 (asoc->stream_queue_cnt == 0)) {
4360 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4363 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4364 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4365 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4366 sctp_send_shutdown_ack(stcb,
4367 stcb->asoc.primary_destination);
4369 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4370 stcb->sctp_ep, stcb, asoc->primary_destination);
4373 /*********************************************/
4374 /* Here we perform PR-SCTP procedures */
4376 /*********************************************/
4377 /* C1. update advancedPeerAckPoint */
4378 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4379 asoc->advanced_peer_ack_point = cumack;
4381 /* PR-Sctp issues need to be addressed too */
4382 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4383 struct sctp_tmit_chunk *lchk;
4384 uint32_t old_adv_peer_ack_point;
4386 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4387 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4388 /* C3. See if we need to send a Fwd-TSN */
4389 if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4392 * ISSUE with ECN, see FWD-TSN processing for notes
4393 * on issues that will occur when the ECN NONCE
4394 * stuff is put into SCTP for cross checking.
4396 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4398 send_forward_tsn(stcb, asoc);
4400 * ECN Nonce: Disable Nonce Sum check when
4401 * FWD TSN is sent and store resync tsn
4403 asoc->nonce_sum_check = 0;
4404 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4406 /* try to FR fwd-tsn's that get lost too */
4407 lchk->rec.data.fwd_tsn_cnt++;
4408 if (lchk->rec.data.fwd_tsn_cnt > 3) {
4409 send_forward_tsn(stcb, asoc);
4410 lchk->rec.data.fwd_tsn_cnt = 0;
4415 /* Assure a timer is up */
4416 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4417 stcb->sctp_ep, stcb, lchk->whoTo);
4420 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4421 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4423 stcb->asoc.peers_rwnd,
4424 stcb->asoc.total_flight,
4425 stcb->asoc.total_output_queue_size);
4430 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4431 struct sctp_tcb *stcb, struct sctp_nets *net_from,
4432 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4433 int *abort_now, uint8_t flags,
4434 uint32_t cum_ack, uint32_t rwnd)
4436 struct sctp_association *asoc;
4437 struct sctp_tmit_chunk *tp1, *tp2;
4438 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4439 uint32_t sav_cum_ack;
4440 uint16_t wake_him = 0;
4441 uint32_t send_s = 0;
4443 int accum_moved = 0;
4444 int will_exit_fast_recovery = 0;
4445 uint32_t a_rwnd, old_rwnd;
4446 int win_probe_recovery = 0;
4447 int win_probe_recovered = 0;
4448 struct sctp_nets *net = NULL;
4449 int nonce_sum_flag, ecn_seg_sums = 0;
4451 uint8_t reneged_all = 0;
4452 uint8_t cmt_dac_flag;
4455 * we take any chance we can to service our queues since we cannot
4456 * get awoken when the socket is read from :<
4459 * Now perform the actual SACK handling: 1) Verify that it is not an
4460 * old sack, if so discard. 2) If there is nothing left in the send
4461 * queue (cum-ack is equal to last acked) then you have a duplicate
4462 * too, update any rwnd change and verify no timers are running.
4463 * then return. 3) Process any new consequtive data i.e. cum-ack
4464 * moved process these first and note that it moved. 4) Process any
4465 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4466 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4467 * sync up flightsizes and things, stop all timers and also check
4468 * for shutdown_pending state. If so then go ahead and send off the
4469 * shutdown. If in shutdown recv, send off the shutdown-ack and
4470 * start that timer, Ret. 9) Strike any non-acked things and do FR
4471 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4472 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4473 * if in shutdown_recv state.
4475 SCTP_TCB_LOCK_ASSERT(stcb);
4477 this_sack_lowest_newack = 0;
4479 SCTP_STAT_INCR(sctps_slowpath_sack);
4481 nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4482 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4483 #ifdef SCTP_ASOCLOG_OF_TSNS
4484 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4485 stcb->asoc.cumack_log_at++;
4486 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4487 stcb->asoc.cumack_log_at = 0;
4492 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4493 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4494 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4496 old_rwnd = stcb->asoc.peers_rwnd;
4497 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4498 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4499 stcb->asoc.overall_error_count,
4501 SCTP_FROM_SCTP_INDATA,
4504 stcb->asoc.overall_error_count = 0;
4506 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4507 sctp_log_sack(asoc->last_acked_seq,
4514 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4516 uint32_t *dupdata, dblock;
4518 for (i = 0; i < num_dup; i++) {
4519 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4520 sizeof(uint32_t), (uint8_t *) & dblock);
4521 if (dupdata == NULL) {
4524 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4527 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4529 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4530 tp1 = TAILQ_LAST(&asoc->sent_queue,
4531 sctpchunk_listhead);
4532 send_s = tp1->rec.data.TSN_seq + 1;
4535 send_s = asoc->sending_seq;
4537 if (cum_ack == send_s ||
4538 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4542 * no way, we have not even sent this TSN out yet.
4543 * Peer is hopelessly messed up with us.
4545 printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4548 printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4549 tp1->rec.data.TSN_seq, tp1);
4554 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4555 0, M_DONTWAIT, 1, MT_DATA);
4557 struct sctp_paramhdr *ph;
4560 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4562 ph = mtod(oper, struct sctp_paramhdr *);
4563 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4564 ph->param_length = htons(SCTP_BUF_LEN(oper));
4565 ippp = (uint32_t *) (ph + 1);
4566 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4568 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4569 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4573 /**********************/
4574 /* 1) check the range */
4575 /**********************/
4576 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4577 /* acking something behind */
4580 sav_cum_ack = asoc->last_acked_seq;
4582 /* update the Rwnd of the peer */
4583 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4584 TAILQ_EMPTY(&asoc->send_queue) &&
4585 (asoc->stream_queue_cnt == 0)) {
4586 /* nothing left on send/sent and strmq */
4587 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4588 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4589 asoc->peers_rwnd, 0, 0, a_rwnd);
4591 asoc->peers_rwnd = a_rwnd;
4592 if (asoc->sent_queue_retran_cnt) {
4593 asoc->sent_queue_retran_cnt = 0;
4595 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4596 /* SWS sender side engages */
4597 asoc->peers_rwnd = 0;
4599 /* stop any timers */
4600 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4601 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4602 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4603 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4604 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4605 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4606 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4607 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4610 net->partial_bytes_acked = 0;
4611 net->flight_size = 0;
4613 asoc->total_flight = 0;
4614 asoc->total_flight_count = 0;
4618 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4619 * things. The total byte count acked is tracked in netAckSz AND
4620 * netAck2 is used to track the total bytes acked that are un-
4621 * amibguious and were never retransmitted. We track these on a per
4622 * destination address basis.
4624 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4625 net->prev_cwnd = net->cwnd;
4630 * CMT: Reset CUC and Fast recovery algo variables before
4633 net->new_pseudo_cumack = 0;
4634 net->will_exit_fast_recovery = 0;
4636 /* process the new consecutive TSN first */
4637 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4639 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4641 last_tsn == tp1->rec.data.TSN_seq) {
4642 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4644 * ECN Nonce: Add the nonce to the sender's
4647 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4649 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4651 * If it is less than ACKED, it is
4652 * now no-longer in flight. Higher
4653 * values may occur during marking
4655 if ((tp1->whoTo->dest_state &
4656 SCTP_ADDR_UNCONFIRMED) &&
4657 (tp1->snd_count < 2)) {
4659 * If there was no retran
4660 * and the address is
4661 * un-confirmed and we sent
4663 * sacked.. its confirmed,
4666 tp1->whoTo->dest_state &=
4667 ~SCTP_ADDR_UNCONFIRMED;
4669 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4670 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4671 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4672 tp1->whoTo->flight_size,
4674 (uintptr_t) tp1->whoTo,
4675 tp1->rec.data.TSN_seq);
4677 sctp_flight_size_decrease(tp1);
4678 sctp_total_flight_decrease(stcb, tp1);
4680 tp1->whoTo->net_ack += tp1->send_size;
4682 /* CMT SFR and DAC algos */
4683 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4684 tp1->whoTo->saw_newack = 1;
4686 if (tp1->snd_count < 2) {
4688 * True non-retransmited
4691 tp1->whoTo->net_ack2 +=
4694 /* update RTO too? */
4697 sctp_calculate_rto(stcb,
4699 &tp1->sent_rcv_time,
4700 sctp_align_safe_nocopy);
4705 * CMT: CUCv2 algorithm. From the
4706 * cumack'd TSNs, for each TSN being
4707 * acked for the first time, set the
4708 * following variables for the
4709 * corresp destination.
4710 * new_pseudo_cumack will trigger a
4712 * find_(rtx_)pseudo_cumack will
4713 * trigger search for the next
4714 * expected (rtx-)pseudo-cumack.
4716 tp1->whoTo->new_pseudo_cumack = 1;
4717 tp1->whoTo->find_pseudo_cumack = 1;
4718 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4721 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4722 sctp_log_sack(asoc->last_acked_seq,
4724 tp1->rec.data.TSN_seq,
4727 SCTP_LOG_TSN_ACKED);
4729 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4730 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4733 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4734 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4735 #ifdef SCTP_AUDITING_ENABLED
4736 sctp_audit_log(0xB3,
4737 (asoc->sent_queue_retran_cnt & 0x000000ff));
4740 if (tp1->rec.data.chunk_was_revoked) {
4741 /* deflate the cwnd */
4742 tp1->whoTo->cwnd -= tp1->book_size;
4743 tp1->rec.data.chunk_was_revoked = 0;
4745 tp1->sent = SCTP_DATAGRAM_ACKED;
4750 tp1 = TAILQ_NEXT(tp1, sctp_next);
4752 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4753 /* always set this up to cum-ack */
4754 asoc->this_sack_highest_gap = last_tsn;
4756 if ((num_seg > 0) || (num_nr_seg > 0)) {
4759 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4760 * to be greater than the cumack. Also reset saw_newack to 0
4763 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4764 net->saw_newack = 0;
4765 net->this_sack_highest_newack = last_tsn;
4769 * thisSackHighestGap will increase while handling NEW
4770 * segments this_sack_highest_newack will increase while
4771 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4772 * used for CMT DAC algo. saw_newack will also change.
4774 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4775 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4776 num_seg, num_nr_seg, &ecn_seg_sums)) {
4779 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4781 * validate the biggest_tsn_acked in the gap acks if
4782 * strict adherence is wanted.
4784 if ((biggest_tsn_acked == send_s) ||
4785 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4787 * peer is either confused or we are under
4788 * attack. We must abort.
4790 printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4798 /*******************************************/
4799 /* cancel ALL T3-send timer if accum moved */
4800 /*******************************************/
4801 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
4802 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4803 if (net->new_pseudo_cumack)
4804 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4806 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4811 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4812 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4813 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4817 /********************************************/
4818 /* drop the acked chunks from the sendqueue */
4819 /********************************************/
4820 asoc->last_acked_seq = cum_ack;
4822 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4826 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4830 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4831 /* no more sent on list */
4832 printf("Warning, tp1->sent == %d and its now acked?\n",
4835 tp2 = TAILQ_NEXT(tp1, sctp_next);
4836 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4837 if (tp1->pr_sctp_on) {
4838 if (asoc->pr_sctp_cnt != 0)
4839 asoc->pr_sctp_cnt--;
4841 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
4842 (asoc->total_flight > 0)) {
4844 panic("Warning flight size is postive and should be 0");
4846 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4847 asoc->total_flight);
4849 asoc->total_flight = 0;
4852 /* sa_ignore NO_NULL_CHK */
4853 sctp_free_bufspace(stcb, asoc, tp1, 1);
4854 sctp_m_freem(tp1->data);
4855 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4856 asoc->sent_queue_cnt_removeable--;
4859 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4860 sctp_log_sack(asoc->last_acked_seq,
4862 tp1->rec.data.TSN_seq,
4865 SCTP_LOG_FREE_SENT);
4868 asoc->sent_queue_cnt--;
4869 sctp_free_a_chunk(stcb, tp1);
4872 } while (tp1 != NULL);
4875 /* sa_ignore NO_NULL_CHK */
4876 if ((wake_him) && (stcb->sctp_socket)) {
4877 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4881 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4882 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4883 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4885 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4886 so = SCTP_INP_SO(stcb->sctp_ep);
4887 atomic_add_int(&stcb->asoc.refcnt, 1);
4888 SCTP_TCB_UNLOCK(stcb);
4889 SCTP_SOCKET_LOCK(so, 1);
4890 SCTP_TCB_LOCK(stcb);
4891 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4892 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4893 /* assoc was freed while we were unlocked */
4894 SCTP_SOCKET_UNLOCK(so, 1);
4898 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4899 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4900 SCTP_SOCKET_UNLOCK(so, 1);
4903 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4904 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4908 if (asoc->fast_retran_loss_recovery && accum_moved) {
4909 if (compare_with_wrap(asoc->last_acked_seq,
4910 asoc->fast_recovery_tsn, MAX_TSN) ||
4911 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4912 /* Setup so we will exit RFC2582 fast recovery */
4913 will_exit_fast_recovery = 1;
4917 * Check for revoked fragments:
4919 * if Previous sack - Had no frags then we can't have any revoked if
4920 * Previous sack - Had frag's then - If we now have frags aka
4921 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4922 * some of them. else - The peer revoked all ACKED fragments, since
4923 * we had some before and now we have NONE.
4927 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4928 else if (asoc->saw_sack_with_frags) {
4929 int cnt_revoked = 0;
4931 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4933 /* Peer revoked all dg's marked or acked */
4934 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4935 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4936 tp1->sent = SCTP_DATAGRAM_SENT;
4937 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4938 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4939 tp1->whoTo->flight_size,
4941 (uintptr_t) tp1->whoTo,
4942 tp1->rec.data.TSN_seq);
4944 sctp_flight_size_increase(tp1);
4945 sctp_total_flight_increase(stcb, tp1);
4946 tp1->rec.data.chunk_was_revoked = 1;
4948 * To ensure that this increase in
4949 * flightsize, which is artificial,
4950 * does not throttle the sender, we
4951 * also increase the cwnd
4954 tp1->whoTo->cwnd += tp1->book_size;
4962 asoc->saw_sack_with_frags = 0;
4964 if (num_seg || num_nr_seg)
4965 asoc->saw_sack_with_frags = 1;
4967 asoc->saw_sack_with_frags = 0;
4969 /* JRS - Use the congestion control given in the CC module */
4970 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4972 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4973 /* nothing left in-flight */
4974 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4975 /* stop all timers */
4976 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4977 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4978 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4979 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4980 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4983 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4984 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4985 net->flight_size = 0;
4986 net->partial_bytes_acked = 0;
4988 asoc->total_flight = 0;
4989 asoc->total_flight_count = 0;
4991 /**********************************/
4992 /* Now what about shutdown issues */
4993 /**********************************/
4994 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4995 /* nothing left on sendqueue.. consider done */
4996 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4997 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4998 asoc->peers_rwnd, 0, 0, a_rwnd);
5000 asoc->peers_rwnd = a_rwnd;
5001 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5002 /* SWS sender side engages */
5003 asoc->peers_rwnd = 0;
5006 if ((asoc->stream_queue_cnt == 1) &&
5007 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5008 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5009 (asoc->locked_on_sending)
5011 struct sctp_stream_queue_pending *sp;
5014 * I may be in a state where we got all across.. but
5015 * cannot write more due to a shutdown... we abort
5016 * since the user did not indicate EOR in this case.
5018 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5020 if ((sp) && (sp->length == 0)) {
5021 asoc->locked_on_sending = NULL;
5022 if (sp->msg_is_complete) {
5023 asoc->stream_queue_cnt--;
5025 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5026 asoc->stream_queue_cnt--;
5030 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5031 (asoc->stream_queue_cnt == 0)) {
5032 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5033 /* Need to abort here */
5039 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5040 0, M_DONTWAIT, 1, MT_DATA);
5042 struct sctp_paramhdr *ph;
5045 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5047 ph = mtod(oper, struct sctp_paramhdr *);
5048 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5049 ph->param_length = htons(SCTP_BUF_LEN(oper));
5050 ippp = (uint32_t *) (ph + 1);
5051 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5053 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5054 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5057 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5058 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5059 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5061 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5062 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5063 sctp_stop_timers_for_shutdown(stcb);
5064 sctp_send_shutdown(stcb,
5065 stcb->asoc.primary_destination);
5066 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5067 stcb->sctp_ep, stcb, asoc->primary_destination);
5068 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5069 stcb->sctp_ep, stcb, asoc->primary_destination);
5072 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5073 (asoc->stream_queue_cnt == 0)) {
5074 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5077 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5078 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5079 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5080 sctp_send_shutdown_ack(stcb,
5081 stcb->asoc.primary_destination);
5083 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5084 stcb->sctp_ep, stcb, asoc->primary_destination);
5089 * Now here we are going to recycle net_ack for a different use...
5092 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5097 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5098 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5099 * automatically ensure that.
5101 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5102 this_sack_lowest_newack = cum_ack;
5104 if ((num_seg > 0) || (num_nr_seg > 0)) {
5105 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5106 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5108 /* JRS - Use the congestion control given in the CC module */
5109 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5111 /******************************************************************
5112 * Here we do the stuff with ECN Nonce checking.
5113 * We basically check to see if the nonce sum flag was incorrect
5114 * or if resynchronization needs to be done. Also if we catch a
5115 * misbehaving receiver we give him the kick.
5116 ******************************************************************/
5118 if (asoc->ecn_nonce_allowed) {
5119 if (asoc->nonce_sum_check) {
5120 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5121 if (asoc->nonce_wait_for_ecne == 0) {
5122 struct sctp_tmit_chunk *lchk;
5124 lchk = TAILQ_FIRST(&asoc->send_queue);
5125 asoc->nonce_wait_for_ecne = 1;
5127 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5129 asoc->nonce_wait_tsn = asoc->sending_seq;
5132 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5133 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5135 * Misbehaving peer. We need
5136 * to react to this guy
5138 asoc->ecn_allowed = 0;
5139 asoc->ecn_nonce_allowed = 0;
5144 /* See if Resynchronization Possible */
5145 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5146 asoc->nonce_sum_check = 1;
5148 * now we must calculate what the base is.
5149 * We do this based on two things, we know
5150 * the total's for all the segments
5151 * gap-acked in the SACK, its stored in
5152 * ecn_seg_sums. We also know the SACK's
5153 * nonce sum, its in nonce_sum_flag. So we
5154 * can build a truth table to back-calculate
5156 * asoc->nonce_sum_expect_base:
5158 * SACK-flag-Value Seg-Sums Base 0 0 0
5162 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5166 /* Now are we exiting loss recovery ? */
5167 if (will_exit_fast_recovery) {
5168 /* Ok, we must exit fast recovery */
5169 asoc->fast_retran_loss_recovery = 0;
5171 if ((asoc->sat_t3_loss_recovery) &&
5172 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5174 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5175 /* end satellite t3 loss recovery */
5176 asoc->sat_t3_loss_recovery = 0;
5181 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5182 if (net->will_exit_fast_recovery) {
5183 /* Ok, we must exit fast recovery */
5184 net->fast_retran_loss_recovery = 0;
5188 /* Adjust and set the new rwnd value */
5189 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5190 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5191 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5193 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5194 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5195 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5196 /* SWS sender side engages */
5197 asoc->peers_rwnd = 0;
5199 if (asoc->peers_rwnd > old_rwnd) {
5200 win_probe_recovery = 1;
5203 * Now we must setup so we have a timer up for anyone with
5209 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5210 if (win_probe_recovery && (net->window_probe)) {
5211 win_probe_recovered = 1;
5213 * Find first chunk that was used with
5214 * window probe and clear the event. Put
5215 * it back into the send queue as if has
5218 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5219 if (tp1->window_probe) {
5220 sctp_window_probe_recovery(stcb, asoc, net, tp1);
5225 if (net->flight_size) {
5227 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5228 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5229 stcb->sctp_ep, stcb, net);
5231 if (net->window_probe) {
5232 net->window_probe = 0;
5235 if (net->window_probe) {
5237 * In window probes we must assure a timer
5238 * is still running there
5240 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5241 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5242 stcb->sctp_ep, stcb, net);
5245 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5246 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5248 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5250 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5251 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5252 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5253 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5254 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5260 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5261 (asoc->sent_queue_retran_cnt == 0) &&
5262 (win_probe_recovered == 0) &&
5265 * huh, this should not happen unless all packets are
5266 * PR-SCTP and marked to skip of course.
5268 if (sctp_fs_audit(asoc)) {
5269 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5270 net->flight_size = 0;
5272 asoc->total_flight = 0;
5273 asoc->total_flight_count = 0;
5274 asoc->sent_queue_retran_cnt = 0;
5275 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5276 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5277 sctp_flight_size_increase(tp1);
5278 sctp_total_flight_increase(stcb, tp1);
5279 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5280 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5287 /*********************************************/
5288 /* Here we perform PR-SCTP procedures */
5290 /*********************************************/
5291 /* C1. update advancedPeerAckPoint */
5292 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5293 asoc->advanced_peer_ack_point = cum_ack;
5295 /* C2. try to further move advancedPeerAckPoint ahead */
5296 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5297 struct sctp_tmit_chunk *lchk;
5298 uint32_t old_adv_peer_ack_point;
5300 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5301 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5302 /* C3. See if we need to send a Fwd-TSN */
5303 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5306 * ISSUE with ECN, see FWD-TSN processing for notes
5307 * on issues that will occur when the ECN NONCE
5308 * stuff is put into SCTP for cross checking.
5310 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5311 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5312 0xee, cum_ack, asoc->advanced_peer_ack_point,
5313 old_adv_peer_ack_point);
5315 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5317 send_forward_tsn(stcb, asoc);
5319 * ECN Nonce: Disable Nonce Sum check when
5320 * FWD TSN is sent and store resync tsn
5322 asoc->nonce_sum_check = 0;
5323 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5325 /* try to FR fwd-tsn's that get lost too */
5326 lchk->rec.data.fwd_tsn_cnt++;
5327 if (lchk->rec.data.fwd_tsn_cnt > 3) {
5328 send_forward_tsn(stcb, asoc);
5329 lchk->rec.data.fwd_tsn_cnt = 0;
5334 /* Assure a timer is up */
5335 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5336 stcb->sctp_ep, stcb, lchk->whoTo);
5339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5340 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5342 stcb->asoc.peers_rwnd,
5343 stcb->asoc.total_flight,
5344 stcb->asoc.total_output_queue_size);
5349 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5350 struct sctp_nets *netp, int *abort_flag)
5353 uint32_t cum_ack, a_rwnd;
5355 cum_ack = ntohl(cp->cumulative_tsn_ack);
5356 /* Arrange so a_rwnd does NOT change */
5357 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5359 /* Now call the express sack handling */
5360 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5364 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5365 struct sctp_stream_in *strmin)
5367 struct sctp_queued_to_read *ctl, *nctl;
5368 struct sctp_association *asoc;
5372 tt = strmin->last_sequence_delivered;
5374 * First deliver anything prior to and including the stream no that
5377 ctl = TAILQ_FIRST(&strmin->inqueue);
5379 nctl = TAILQ_NEXT(ctl, next);
5380 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5381 (tt == ctl->sinfo_ssn)) {
5382 /* this is deliverable now */
5383 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5384 /* subtract pending on streams */
5385 asoc->size_on_all_streams -= ctl->length;
5386 sctp_ucount_decr(asoc->cnt_on_all_streams);
5387 /* deliver it to at least the delivery-q */
5388 if (stcb->sctp_socket) {
5389 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5390 sctp_add_to_readq(stcb->sctp_ep, stcb,
5392 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5395 /* no more delivery now. */
5401 * now we must deliver things in queue the normal way if any are
5404 tt = strmin->last_sequence_delivered + 1;
5405 ctl = TAILQ_FIRST(&strmin->inqueue);
5407 nctl = TAILQ_NEXT(ctl, next);
5408 if (tt == ctl->sinfo_ssn) {
5409 /* this is deliverable now */
5410 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5411 /* subtract pending on streams */
5412 asoc->size_on_all_streams -= ctl->length;
5413 sctp_ucount_decr(asoc->cnt_on_all_streams);
5414 /* deliver it to at least the delivery-q */
5415 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5416 if (stcb->sctp_socket) {
5417 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5418 sctp_add_to_readq(stcb->sctp_ep, stcb,
5420 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5423 tt = strmin->last_sequence_delivered + 1;
5432 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5433 struct sctp_association *asoc,
5434 uint16_t stream, uint16_t seq)
5436 struct sctp_tmit_chunk *chk, *at;
5438 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5439 /* For each one on here see if we need to toss it */
5441 * For now large messages held on the reasmqueue that are
5442 * complete will be tossed too. We could in theory do more
5443 * work to spin through and stop after dumping one msg aka
5444 * seeing the start of a new msg at the head, and call the
5445 * delivery function... to see if it can be delivered... But
5446 * for now we just dump everything on the queue.
5448 chk = TAILQ_FIRST(&asoc->reasmqueue);
5450 at = TAILQ_NEXT(chk, sctp_next);
5452 * Do not toss it if on a different stream or marked
5453 * for unordered delivery in which case the stream
5454 * sequence number has no meaning.
5456 if ((chk->rec.data.stream_number != stream) ||
5457 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5461 if (chk->rec.data.stream_seq == seq) {
5462 /* It needs to be tossed */
5463 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5464 if (compare_with_wrap(chk->rec.data.TSN_seq,
5465 asoc->tsn_last_delivered, MAX_TSN)) {
5466 asoc->tsn_last_delivered =
5467 chk->rec.data.TSN_seq;
5468 asoc->str_of_pdapi =
5469 chk->rec.data.stream_number;
5470 asoc->ssn_of_pdapi =
5471 chk->rec.data.stream_seq;
5472 asoc->fragment_flags =
5473 chk->rec.data.rcv_flags;
5475 asoc->size_on_reasm_queue -= chk->send_size;
5476 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5478 /* Clear up any stream problem */
5479 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5480 SCTP_DATA_UNORDERED &&
5481 (compare_with_wrap(chk->rec.data.stream_seq,
5482 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5485 * We must dump forward this streams
5486 * sequence number if the chunk is
5487 * not unordered that is being
5488 * skipped. There is a chance that
5489 * if the peer does not include the
5490 * last fragment in its FWD-TSN we
5491 * WILL have a problem here since
5492 * you would have a partial chunk in
5493 * queue that may not be
5494 * deliverable. Also if a Partial
5495 * delivery API as started the user
5496 * may get a partial chunk. The next
5497 * read returning a new chunk...
5498 * really ugly but I see no way
5499 * around it! Maybe a notify??
5501 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5502 chk->rec.data.stream_seq;
5505 sctp_m_freem(chk->data);
5508 sctp_free_a_chunk(stcb, chk);
5509 } else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5511 * If the stream_seq is > than the purging
5523 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5524 struct sctp_forward_tsn_chunk *fwd,
5525 int *abort_flag, struct mbuf *m, int offset)
5528 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5529 * forward TSN, when the SACK comes back that acknowledges the
5530 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5531 * get quite tricky since we may have sent more data interveneing
5532 * and must carefully account for what the SACK says on the nonce
5533 * and any gaps that are reported. This work will NOT be done here,
5534 * but I note it here since it is really related to PR-SCTP and
5538 /* The pr-sctp fwd tsn */
5540 * here we will perform all the data receiver side steps for
5541 * processing FwdTSN, as required in by pr-sctp draft:
5543 * Assume we get FwdTSN(x):
5545 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5546 * others we have 3) examine and update re-ordering queue on
5547 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5548 * report where we are.
5550 struct sctp_association *asoc;
5551 uint32_t new_cum_tsn, gap;
5552 unsigned int i, fwd_sz, cumack_set_flag, m_size;
5554 struct sctp_stream_in *strm;
5555 struct sctp_tmit_chunk *chk, *at;
5556 struct sctp_queued_to_read *ctl, *sv;
5558 cumack_set_flag = 0;
5560 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5561 SCTPDBG(SCTP_DEBUG_INDATA1,
5562 "Bad size too small/big fwd-tsn\n");
5565 m_size = (stcb->asoc.mapping_array_size << 3);
5566 /*************************************************************/
5567 /* 1. Here we update local cumTSN and shift the bitmap array */
5568 /*************************************************************/
5569 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5571 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5572 asoc->cumulative_tsn == new_cum_tsn) {
5573 /* Already got there ... */
5577 * now we know the new TSN is more advanced, let's find the actual
5580 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5581 asoc->cumulative_tsn = new_cum_tsn;
5582 if (gap >= m_size) {
5583 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5587 * out of range (of single byte chunks in the rwnd I
5588 * give out). This must be an attacker.
5591 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5592 0, M_DONTWAIT, 1, MT_DATA);
5594 struct sctp_paramhdr *ph;
5597 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5598 (sizeof(uint32_t) * 3);
5599 ph = mtod(oper, struct sctp_paramhdr *);
5600 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5601 ph->param_length = htons(SCTP_BUF_LEN(oper));
5602 ippp = (uint32_t *) (ph + 1);
5603 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5605 *ippp = asoc->highest_tsn_inside_map;
5607 *ippp = new_cum_tsn;
5609 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5610 sctp_abort_an_association(stcb->sctp_ep, stcb,
5611 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5614 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5616 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5617 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5618 asoc->highest_tsn_inside_map = new_cum_tsn;
5620 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5621 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5623 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5624 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5626 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5628 SCTP_TCB_LOCK_ASSERT(stcb);
5629 for (i = 0; i <= gap; i++) {
5630 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5631 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5632 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5633 if (compare_with_wrap(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
5634 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5639 /*************************************************************/
5640 /* 2. Clear up re-assembly queue */
5641 /*************************************************************/
5643 * First service it if pd-api is up, just in case we can progress it
5646 if (asoc->fragmented_delivery_inprogress) {
5647 sctp_service_reassembly(stcb, asoc);
5649 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5650 /* For each one on here see if we need to toss it */
5652 * For now large messages held on the reasmqueue that are
5653 * complete will be tossed too. We could in theory do more
5654 * work to spin through and stop after dumping one msg aka
5655 * seeing the start of a new msg at the head, and call the
5656 * delivery function... to see if it can be delivered... But
5657 * for now we just dump everything on the queue.
5659 chk = TAILQ_FIRST(&asoc->reasmqueue);
5661 at = TAILQ_NEXT(chk, sctp_next);
5662 if ((compare_with_wrap(new_cum_tsn,
5663 chk->rec.data.TSN_seq, MAX_TSN)) ||
5664 (new_cum_tsn == chk->rec.data.TSN_seq)) {
5665 /* It needs to be tossed */
5666 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5667 if (compare_with_wrap(chk->rec.data.TSN_seq,
5668 asoc->tsn_last_delivered, MAX_TSN)) {
5669 asoc->tsn_last_delivered =
5670 chk->rec.data.TSN_seq;
5671 asoc->str_of_pdapi =
5672 chk->rec.data.stream_number;
5673 asoc->ssn_of_pdapi =
5674 chk->rec.data.stream_seq;
5675 asoc->fragment_flags =
5676 chk->rec.data.rcv_flags;
5678 asoc->size_on_reasm_queue -= chk->send_size;
5679 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5681 /* Clear up any stream problem */
5682 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5683 SCTP_DATA_UNORDERED &&
5684 (compare_with_wrap(chk->rec.data.stream_seq,
5685 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5688 * We must dump forward this streams
5689 * sequence number if the chunk is
5690 * not unordered that is being
5691 * skipped. There is a chance that
5692 * if the peer does not include the
5693 * last fragment in its FWD-TSN we
5694 * WILL have a problem here since
5695 * you would have a partial chunk in
5696 * queue that may not be
5697 * deliverable. Also if a Partial
5698 * delivery API as started the user
5699 * may get a partial chunk. The next
5700 * read returning a new chunk...
5701 * really ugly but I see no way
5702 * around it! Maybe a notify??
5704 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5705 chk->rec.data.stream_seq;
5708 sctp_m_freem(chk->data);
5711 sctp_free_a_chunk(stcb, chk);
5714 * Ok we have gone beyond the end of the
5722 /*******************************************************/
5723 /* 3. Update the PR-stream re-ordering queues and fix */
5724 /* delivery issues as needed. */
5725 /*******************************************************/
5726 fwd_sz -= sizeof(*fwd);
5729 unsigned int num_str;
5730 struct sctp_strseq *stseq, strseqbuf;
5732 offset += sizeof(*fwd);
5734 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5735 num_str = fwd_sz / sizeof(struct sctp_strseq);
5736 for (i = 0; i < num_str; i++) {
5739 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5740 sizeof(struct sctp_strseq),
5741 (uint8_t *) & strseqbuf);
5742 offset += sizeof(struct sctp_strseq);
5743 if (stseq == NULL) {
5747 st = ntohs(stseq->stream);
5749 st = ntohs(stseq->sequence);
5750 stseq->sequence = st;
5755 * Ok we now look for the stream/seq on the read
5756 * queue where its not all delivered. If we find it
5757 * we transmute the read entry into a PDI_ABORTED.
5759 if (stseq->stream >= asoc->streamincnt) {
5760 /* screwed up streams, stop! */
5763 if ((asoc->str_of_pdapi == stseq->stream) &&
5764 (asoc->ssn_of_pdapi == stseq->sequence)) {
5766 * If this is the one we were partially
5767 * delivering now then we no longer are.
5768 * Note this will change with the reassembly
5771 asoc->fragmented_delivery_inprogress = 0;
5773 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5774 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5775 if ((ctl->sinfo_stream == stseq->stream) &&
5776 (ctl->sinfo_ssn == stseq->sequence)) {
5777 str_seq = (stseq->stream << 16) | stseq->sequence;
5779 ctl->pdapi_aborted = 1;
5780 sv = stcb->asoc.control_pdapi;
5781 stcb->asoc.control_pdapi = ctl;
5782 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5784 SCTP_PARTIAL_DELIVERY_ABORTED,
5786 SCTP_SO_NOT_LOCKED);
5787 stcb->asoc.control_pdapi = sv;
5789 } else if ((ctl->sinfo_stream == stseq->stream) &&
5790 (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
5791 /* We are past our victim SSN */
5795 strm = &asoc->strmin[stseq->stream];
5796 if (compare_with_wrap(stseq->sequence,
5797 strm->last_sequence_delivered, MAX_SEQ)) {
5798 /* Update the sequence number */
5799 strm->last_sequence_delivered =
5802 /* now kick the stream the new way */
5803 /* sa_ignore NO_NULL_CHK */
5804 sctp_kick_prsctp_reorder_queue(stcb, strm);
5806 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5809 * Now slide thing forward.
5811 sctp_slide_mapping_arrays(stcb);
5813 if (TAILQ_FIRST(&asoc->reasmqueue)) {
5814 /* now lets kick out and check for more fragmented delivery */
5815 /* sa_ignore NO_NULL_CHK */
5816 sctp_deliver_reasm_check(stcb, &stcb->asoc);