2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
95 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
101 /* what is the overhead of all these rwnd's */
102 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
104 * If the window gets too small due to ctrl-stuff, reduce it to 1,
105 * even it is 0. SWS engaged
107 if (calc < stcb->asoc.my_rwnd_control_len) {
116 * Build out our readq entry based on the incoming packet.
118 struct sctp_queued_to_read *
119 sctp_build_readq_entry(struct sctp_tcb *stcb,
120 struct sctp_nets *net,
121 uint32_t tsn, uint32_t ppid,
122 uint32_t context, uint16_t stream_no,
123 uint16_t stream_seq, uint8_t flags,
126 struct sctp_queued_to_read *read_queue_e = NULL;
128 sctp_alloc_a_readq(stcb, read_queue_e);
129 if (read_queue_e == NULL) {
132 read_queue_e->sinfo_stream = stream_no;
133 read_queue_e->sinfo_ssn = stream_seq;
134 read_queue_e->sinfo_flags = (flags << 8);
135 read_queue_e->sinfo_ppid = ppid;
136 read_queue_e->sinfo_context = stcb->asoc.context;
137 read_queue_e->sinfo_timetolive = 0;
138 read_queue_e->sinfo_tsn = tsn;
139 read_queue_e->sinfo_cumtsn = tsn;
140 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
141 read_queue_e->whoFrom = net;
142 read_queue_e->length = 0;
143 atomic_add_int(&net->ref_count, 1);
144 read_queue_e->data = dm;
145 read_queue_e->spec_flags = 0;
146 read_queue_e->tail_mbuf = NULL;
147 read_queue_e->aux_data = NULL;
148 read_queue_e->stcb = stcb;
149 read_queue_e->port_from = stcb->rport;
150 read_queue_e->do_not_ref_stcb = 0;
151 read_queue_e->end_added = 0;
152 read_queue_e->some_taken = 0;
153 read_queue_e->pdapi_aborted = 0;
155 return (read_queue_e);
160 * Build out our readq entry based on the incoming packet.
162 static struct sctp_queued_to_read *
163 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
164 struct sctp_tmit_chunk *chk)
166 struct sctp_queued_to_read *read_queue_e = NULL;
168 sctp_alloc_a_readq(stcb, read_queue_e);
169 if (read_queue_e == NULL) {
172 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
173 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
174 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
175 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
176 read_queue_e->sinfo_context = stcb->asoc.context;
177 read_queue_e->sinfo_timetolive = 0;
178 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
179 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
180 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
181 read_queue_e->whoFrom = chk->whoTo;
182 read_queue_e->aux_data = NULL;
183 read_queue_e->length = 0;
184 atomic_add_int(&chk->whoTo->ref_count, 1);
185 read_queue_e->data = chk->data;
186 read_queue_e->tail_mbuf = NULL;
187 read_queue_e->stcb = stcb;
188 read_queue_e->port_from = stcb->rport;
189 read_queue_e->spec_flags = 0;
190 read_queue_e->do_not_ref_stcb = 0;
191 read_queue_e->end_added = 0;
192 read_queue_e->some_taken = 0;
193 read_queue_e->pdapi_aborted = 0;
195 return (read_queue_e);
200 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
201 struct sctp_sndrcvinfo *sinfo)
203 struct sctp_sndrcvinfo *outinfo;
207 int use_extended = 0;
209 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
210 /* user does not want the sndrcv ctl */
213 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
215 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
217 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
221 ret = sctp_get_mbuf_for_msg(len,
222 0, M_DONTWAIT, 1, MT_DATA);
228 /* We need a CMSG header followed by the struct */
229 cmh = mtod(ret, struct cmsghdr *);
230 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
231 cmh->cmsg_level = IPPROTO_SCTP;
233 cmh->cmsg_type = SCTP_EXTRCV;
235 memcpy(outinfo, sinfo, len);
237 cmh->cmsg_type = SCTP_SNDRCV;
241 SCTP_BUF_LEN(ret) = cmh->cmsg_len;
247 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
249 struct sctp_sndrcvinfo *sinfo)
251 struct sctp_sndrcvinfo *outinfo;
255 int use_extended = 0;
257 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
258 /* user does not want the sndrcv ctl */
261 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
263 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
265 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
267 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
272 /* We need a CMSG header followed by the struct */
273 cmh = (struct cmsghdr *)buf;
274 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
275 cmh->cmsg_level = IPPROTO_SCTP;
277 cmh->cmsg_type = SCTP_EXTRCV;
279 memcpy(outinfo, sinfo, len);
281 cmh->cmsg_type = SCTP_SNDRCV;
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
295 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
298 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
299 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
300 printf("gap:%x tsn:%x\n", gap, tsn);
301 sctp_print_mapping_array(asoc);
303 panic("Things are really messed up now!!");
306 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
308 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
309 asoc->highest_tsn_inside_nr_map = tsn;
311 if (tsn == asoc->highest_tsn_inside_map) {
312 /* We must back down to see what the new highest is */
313 for (i = tsn - 1; (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) ||
314 (i == asoc->mapping_array_base_tsn)); i--) {
315 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
316 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
317 asoc->highest_tsn_inside_map = i;
323 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
330 * We are delivering currently from the reassembly queue. We must continue to
331 * deliver until we either: 1) run out of space. 2) run out of sequential
332 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
335 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
337 struct sctp_tmit_chunk *chk;
343 struct sctp_queued_to_read *control, *ctl, *ctlat;
348 cntDel = stream_no = 0;
349 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
350 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
351 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
352 /* socket above is long gone or going.. */
354 asoc->fragmented_delivery_inprogress = 0;
355 chk = TAILQ_FIRST(&asoc->reasmqueue);
357 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
358 asoc->size_on_reasm_queue -= chk->send_size;
359 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
361 * Lose the data pointer, since its in the socket
365 sctp_m_freem(chk->data);
368 /* Now free the address and data */
369 sctp_free_a_chunk(stcb, chk);
370 /* sa_ignore FREED_MEMORY */
371 chk = TAILQ_FIRST(&asoc->reasmqueue);
375 SCTP_TCB_LOCK_ASSERT(stcb);
377 chk = TAILQ_FIRST(&asoc->reasmqueue);
381 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
382 /* Can't deliver more :< */
385 stream_no = chk->rec.data.stream_number;
386 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
387 if (nxt_todel != chk->rec.data.stream_seq &&
388 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
390 * Not the next sequence to deliver in its stream OR
395 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
397 control = sctp_build_readq_entry_chk(stcb, chk);
398 if (control == NULL) {
402 /* save it off for our future deliveries */
403 stcb->asoc.control_pdapi = control;
404 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
408 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
409 sctp_add_to_readq(stcb->sctp_ep,
410 stcb, control, &stcb->sctp_socket->so_rcv, end,
411 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
414 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
418 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
419 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
420 stcb->asoc.control_pdapi,
421 chk->data, end, chk->rec.data.TSN_seq,
422 &stcb->sctp_socket->so_rcv)) {
424 * something is very wrong, either
425 * control_pdapi is NULL, or the tail_mbuf
426 * is corrupt, or there is a EOM already on
429 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
433 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
434 panic("This should not happen control_pdapi NULL?");
436 /* if we did not panic, it was a EOM */
437 panic("Bad chunking ??");
439 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
440 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
442 SCTP_PRINTF("Bad chunking ??\n");
443 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
451 /* pull it we did it */
452 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
453 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
454 asoc->fragmented_delivery_inprogress = 0;
455 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
456 asoc->strmin[stream_no].last_sequence_delivered++;
458 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
459 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
461 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
463 * turn the flag back on since we just delivered
466 asoc->fragmented_delivery_inprogress = 1;
468 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
469 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
470 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
471 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
473 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
474 asoc->size_on_reasm_queue -= chk->send_size;
475 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
476 /* free up the chk */
478 sctp_free_a_chunk(stcb, chk);
480 if (asoc->fragmented_delivery_inprogress == 0) {
482 * Now lets see if we can deliver the next one on
485 struct sctp_stream_in *strm;
487 strm = &asoc->strmin[stream_no];
488 nxt_todel = strm->last_sequence_delivered + 1;
489 ctl = TAILQ_FIRST(&strm->inqueue);
490 if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
491 while (ctl != NULL) {
492 /* Deliver more if we can. */
493 if (nxt_todel == ctl->sinfo_ssn) {
494 ctlat = TAILQ_NEXT(ctl, next);
495 TAILQ_REMOVE(&strm->inqueue, ctl, next);
496 asoc->size_on_all_streams -= ctl->length;
497 sctp_ucount_decr(asoc->cnt_on_all_streams);
498 strm->last_sequence_delivered++;
499 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
500 sctp_add_to_readq(stcb->sctp_ep, stcb,
502 &stcb->sctp_socket->so_rcv, 1,
503 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
508 nxt_todel = strm->last_sequence_delivered + 1;
513 /* sa_ignore FREED_MEMORY */
514 chk = TAILQ_FIRST(&asoc->reasmqueue);
519 * Queue the chunk either right into the socket buffer if it is the next one
520 * to go OR put it in the correct place in the delivery queue. If we do
521 * append to the so_buf, keep doing so until we are out of order. One big
522 * question still remains, what to do when the socket buffer is FULL??
525 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
526 struct sctp_queued_to_read *control, int *abort_flag)
529 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
530 * all the data in one stream this could happen quite rapidly. One
531 * could use the TSN to keep track of things, but this scheme breaks
532 * down in the other type of stream useage that could occur. Send a
533 * single msg to stream 0, send 4Billion messages to stream 1, now
534 * send a message to stream 0. You have a situation where the TSN
535 * has wrapped but not in the stream. Is this worth worrying about
536 * or should we just change our queue sort at the bottom to be by
539 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
540 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
541 * assignment this could happen... and I don't see how this would be
542 * a violation. So for now I am undecided an will leave the sort by
543 * SSN alone. Maybe a hybred approach is the answer
546 struct sctp_stream_in *strm;
547 struct sctp_queued_to_read *at;
553 asoc->size_on_all_streams += control->length;
554 sctp_ucount_incr(asoc->cnt_on_all_streams);
555 strm = &asoc->strmin[control->sinfo_stream];
556 nxt_todel = strm->last_sequence_delivered + 1;
557 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
558 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
560 SCTPDBG(SCTP_DEBUG_INDATA1,
561 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
562 (uint32_t) control->sinfo_stream,
563 (uint32_t) strm->last_sequence_delivered,
564 (uint32_t) nxt_todel);
565 if (compare_with_wrap(strm->last_sequence_delivered,
566 control->sinfo_ssn, MAX_SEQ) ||
567 (strm->last_sequence_delivered == control->sinfo_ssn)) {
568 /* The incoming sseq is behind where we last delivered? */
569 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
570 control->sinfo_ssn, strm->last_sequence_delivered);
573 * throw it in the stream so it gets cleaned up in
574 * association destruction
576 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
577 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
578 0, M_DONTWAIT, 1, MT_DATA);
580 struct sctp_paramhdr *ph;
583 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
584 (sizeof(uint32_t) * 3);
585 ph = mtod(oper, struct sctp_paramhdr *);
586 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
587 ph->param_length = htons(SCTP_BUF_LEN(oper));
588 ippp = (uint32_t *) (ph + 1);
589 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
591 *ippp = control->sinfo_tsn;
593 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
595 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
596 sctp_abort_an_association(stcb->sctp_ep, stcb,
597 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
603 if (nxt_todel == control->sinfo_ssn) {
604 /* can be delivered right away? */
605 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
606 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
608 /* EY it wont be queued if it could be delivered directly */
610 asoc->size_on_all_streams -= control->length;
611 sctp_ucount_decr(asoc->cnt_on_all_streams);
612 strm->last_sequence_delivered++;
614 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
615 sctp_add_to_readq(stcb->sctp_ep, stcb,
617 &stcb->sctp_socket->so_rcv, 1,
618 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
619 control = TAILQ_FIRST(&strm->inqueue);
620 while (control != NULL) {
622 nxt_todel = strm->last_sequence_delivered + 1;
623 if (nxt_todel == control->sinfo_ssn) {
624 at = TAILQ_NEXT(control, next);
625 TAILQ_REMOVE(&strm->inqueue, control, next);
626 asoc->size_on_all_streams -= control->length;
627 sctp_ucount_decr(asoc->cnt_on_all_streams);
628 strm->last_sequence_delivered++;
630 * We ignore the return of deliver_data here
631 * since we always can hold the chunk on the
632 * d-queue. And we have a finite number that
633 * can be delivered from the strq.
635 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
636 sctp_log_strm_del(control, NULL,
637 SCTP_STR_LOG_FROM_IMMED_DEL);
639 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
640 sctp_add_to_readq(stcb->sctp_ep, stcb,
642 &stcb->sctp_socket->so_rcv, 1,
643 SCTP_READ_LOCK_NOT_HELD,
653 * Ok, we did not deliver this guy, find the correct place
654 * to put it on the queue.
656 if ((compare_with_wrap(asoc->cumulative_tsn,
657 control->sinfo_tsn, MAX_TSN)) ||
658 (control->sinfo_tsn == asoc->cumulative_tsn)) {
661 if (TAILQ_EMPTY(&strm->inqueue)) {
663 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
664 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
666 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
668 TAILQ_FOREACH(at, &strm->inqueue, next) {
669 if (compare_with_wrap(at->sinfo_ssn,
670 control->sinfo_ssn, MAX_SEQ)) {
672 * one in queue is bigger than the
673 * new one, insert before this one
675 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
676 sctp_log_strm_del(control, at,
677 SCTP_STR_LOG_FROM_INSERT_MD);
679 TAILQ_INSERT_BEFORE(at, control, next);
681 } else if (at->sinfo_ssn == control->sinfo_ssn) {
683 * Gak, He sent me a duplicate str
687 * foo bar, I guess I will just free
688 * this new guy, should we abort
689 * too? FIX ME MAYBE? Or it COULD be
690 * that the SSN's have wrapped.
691 * Maybe I should compare to TSN
692 * somehow... sigh for now just blow
697 sctp_m_freem(control->data);
698 control->data = NULL;
699 asoc->size_on_all_streams -= control->length;
700 sctp_ucount_decr(asoc->cnt_on_all_streams);
701 if (control->whoFrom)
702 sctp_free_remote_addr(control->whoFrom);
703 control->whoFrom = NULL;
704 sctp_free_a_readq(stcb, control);
707 if (TAILQ_NEXT(at, next) == NULL) {
709 * We are at the end, insert
712 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
713 sctp_log_strm_del(control, at,
714 SCTP_STR_LOG_FROM_INSERT_TL);
716 TAILQ_INSERT_AFTER(&strm->inqueue,
727 * Returns two things: You get the total size of the deliverable parts of the
728 * first fragmented message on the reassembly queue. And you get a 1 back if
729 * all of the message is ready or a 0 back if the message is still incomplete
732 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
734 struct sctp_tmit_chunk *chk;
738 chk = TAILQ_FIRST(&asoc->reasmqueue);
740 /* nothing on the queue */
743 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
744 /* Not a first on the queue */
747 tsn = chk->rec.data.TSN_seq;
749 if (tsn != chk->rec.data.TSN_seq) {
752 *t_size += chk->send_size;
753 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
757 chk = TAILQ_NEXT(chk, sctp_next);
763 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
765 struct sctp_tmit_chunk *chk;
767 uint32_t tsize, pd_point;
770 chk = TAILQ_FIRST(&asoc->reasmqueue);
773 asoc->size_on_reasm_queue = 0;
774 asoc->cnt_on_reasm_queue = 0;
777 if (asoc->fragmented_delivery_inprogress == 0) {
779 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
780 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
781 (nxt_todel == chk->rec.data.stream_seq ||
782 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
784 * Yep the first one is here and its ok to deliver
787 if (stcb->sctp_socket) {
788 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
789 stcb->sctp_ep->partial_delivery_point);
791 pd_point = stcb->sctp_ep->partial_delivery_point;
793 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
796 * Yes, we setup to start reception, by
797 * backing down the TSN just in case we
798 * can't deliver. If we
800 asoc->fragmented_delivery_inprogress = 1;
801 asoc->tsn_last_delivered =
802 chk->rec.data.TSN_seq - 1;
804 chk->rec.data.stream_number;
805 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
806 asoc->pdapi_ppid = chk->rec.data.payloadtype;
807 asoc->fragment_flags = chk->rec.data.rcv_flags;
808 sctp_service_reassembly(stcb, asoc);
813 * Service re-assembly will deliver stream data queued at
814 * the end of fragmented delivery.. but it wont know to go
815 * back and call itself again... we do that here with the
818 sctp_service_reassembly(stcb, asoc);
819 if (asoc->fragmented_delivery_inprogress == 0) {
821 * finished our Fragmented delivery, could be more
830 * Dump onto the re-assembly queue, in its proper place. After dumping on the
831 * queue, see if anthing can be delivered. If so pull it off (or as much as
832 * we can. If we run out of space then we must dump what we can and set the
833 * appropriate flag to say we queued what we could.
836 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
837 struct sctp_tmit_chunk *chk, int *abort_flag)
840 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
842 struct sctp_tmit_chunk *at, *prev, *next;
845 cum_ackp1 = asoc->tsn_last_delivered + 1;
846 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
847 /* This is the first one on the queue */
848 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
850 * we do not check for delivery of anything when only one
853 asoc->size_on_reasm_queue = chk->send_size;
854 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
855 if (chk->rec.data.TSN_seq == cum_ackp1) {
856 if (asoc->fragmented_delivery_inprogress == 0 &&
857 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
858 SCTP_DATA_FIRST_FRAG) {
860 * An empty queue, no delivery inprogress,
861 * we hit the next one and it does NOT have
862 * a FIRST fragment mark.
864 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
865 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
866 0, M_DONTWAIT, 1, MT_DATA);
869 struct sctp_paramhdr *ph;
873 sizeof(struct sctp_paramhdr) +
874 (sizeof(uint32_t) * 3);
875 ph = mtod(oper, struct sctp_paramhdr *);
877 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
878 ph->param_length = htons(SCTP_BUF_LEN(oper));
879 ippp = (uint32_t *) (ph + 1);
880 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
882 *ippp = chk->rec.data.TSN_seq;
884 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
887 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
888 sctp_abort_an_association(stcb->sctp_ep, stcb,
889 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
891 } else if (asoc->fragmented_delivery_inprogress &&
892 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
894 * We are doing a partial delivery and the
895 * NEXT chunk MUST be either the LAST or
896 * MIDDLE fragment NOT a FIRST
898 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
899 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
900 0, M_DONTWAIT, 1, MT_DATA);
902 struct sctp_paramhdr *ph;
906 sizeof(struct sctp_paramhdr) +
907 (3 * sizeof(uint32_t));
908 ph = mtod(oper, struct sctp_paramhdr *);
910 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
911 ph->param_length = htons(SCTP_BUF_LEN(oper));
912 ippp = (uint32_t *) (ph + 1);
913 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
915 *ippp = chk->rec.data.TSN_seq;
917 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
919 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
920 sctp_abort_an_association(stcb->sctp_ep, stcb,
921 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
923 } else if (asoc->fragmented_delivery_inprogress) {
925 * Here we are ok with a MIDDLE or LAST
928 if (chk->rec.data.stream_number !=
929 asoc->str_of_pdapi) {
930 /* Got to be the right STR No */
931 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
932 chk->rec.data.stream_number,
934 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
935 0, M_DONTWAIT, 1, MT_DATA);
937 struct sctp_paramhdr *ph;
941 sizeof(struct sctp_paramhdr) +
942 (sizeof(uint32_t) * 3);
944 struct sctp_paramhdr *);
946 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
948 htons(SCTP_BUF_LEN(oper));
949 ippp = (uint32_t *) (ph + 1);
950 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
952 *ippp = chk->rec.data.TSN_seq;
954 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
956 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
957 sctp_abort_an_association(stcb->sctp_ep,
958 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
960 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
961 SCTP_DATA_UNORDERED &&
962 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
963 /* Got to be the right STR Seq */
964 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
965 chk->rec.data.stream_seq,
967 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
968 0, M_DONTWAIT, 1, MT_DATA);
970 struct sctp_paramhdr *ph;
974 sizeof(struct sctp_paramhdr) +
975 (3 * sizeof(uint32_t));
977 struct sctp_paramhdr *);
979 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
981 htons(SCTP_BUF_LEN(oper));
982 ippp = (uint32_t *) (ph + 1);
983 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
985 *ippp = chk->rec.data.TSN_seq;
987 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
990 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
991 sctp_abort_an_association(stcb->sctp_ep,
992 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1000 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1001 if (compare_with_wrap(at->rec.data.TSN_seq,
1002 chk->rec.data.TSN_seq, MAX_TSN)) {
1004 * one in queue is bigger than the new one, insert
1008 asoc->size_on_reasm_queue += chk->send_size;
1009 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1011 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1013 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1014 /* Gak, He sent me a duplicate str seq number */
1016 * foo bar, I guess I will just free this new guy,
1017 * should we abort too? FIX ME MAYBE? Or it COULD be
1018 * that the SSN's have wrapped. Maybe I should
1019 * compare to TSN somehow... sigh for now just blow
1023 sctp_m_freem(chk->data);
1026 sctp_free_a_chunk(stcb, chk);
1029 last_flags = at->rec.data.rcv_flags;
1030 last_tsn = at->rec.data.TSN_seq;
1032 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1034 * We are at the end, insert it after this
1037 /* check it first */
1038 asoc->size_on_reasm_queue += chk->send_size;
1039 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1040 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1045 /* Now the audits */
1047 prev_tsn = chk->rec.data.TSN_seq - 1;
1048 if (prev_tsn == prev->rec.data.TSN_seq) {
1050 * Ok the one I am dropping onto the end is the
1051 * NEXT. A bit of valdiation here.
1053 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1054 SCTP_DATA_FIRST_FRAG ||
1055 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1056 SCTP_DATA_MIDDLE_FRAG) {
1058 * Insert chk MUST be a MIDDLE or LAST
1061 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1062 SCTP_DATA_FIRST_FRAG) {
1063 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1064 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1065 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1066 0, M_DONTWAIT, 1, MT_DATA);
1068 struct sctp_paramhdr *ph;
1071 SCTP_BUF_LEN(oper) =
1072 sizeof(struct sctp_paramhdr) +
1073 (3 * sizeof(uint32_t));
1075 struct sctp_paramhdr *);
1077 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1079 htons(SCTP_BUF_LEN(oper));
1080 ippp = (uint32_t *) (ph + 1);
1081 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1083 *ippp = chk->rec.data.TSN_seq;
1085 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1088 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1089 sctp_abort_an_association(stcb->sctp_ep,
1090 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1094 if (chk->rec.data.stream_number !=
1095 prev->rec.data.stream_number) {
1097 * Huh, need the correct STR here,
1098 * they must be the same.
1100 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1101 chk->rec.data.stream_number,
1102 prev->rec.data.stream_number);
1103 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1104 0, M_DONTWAIT, 1, MT_DATA);
1106 struct sctp_paramhdr *ph;
1109 SCTP_BUF_LEN(oper) =
1110 sizeof(struct sctp_paramhdr) +
1111 (3 * sizeof(uint32_t));
1113 struct sctp_paramhdr *);
1115 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1117 htons(SCTP_BUF_LEN(oper));
1118 ippp = (uint32_t *) (ph + 1);
1119 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1121 *ippp = chk->rec.data.TSN_seq;
1123 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1125 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1126 sctp_abort_an_association(stcb->sctp_ep,
1127 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1132 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1133 chk->rec.data.stream_seq !=
1134 prev->rec.data.stream_seq) {
1136 * Huh, need the correct STR here,
1137 * they must be the same.
1139 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1140 chk->rec.data.stream_seq,
1141 prev->rec.data.stream_seq);
1142 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1143 0, M_DONTWAIT, 1, MT_DATA);
1145 struct sctp_paramhdr *ph;
1148 SCTP_BUF_LEN(oper) =
1149 sizeof(struct sctp_paramhdr) +
1150 (3 * sizeof(uint32_t));
1152 struct sctp_paramhdr *);
1154 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1156 htons(SCTP_BUF_LEN(oper));
1157 ippp = (uint32_t *) (ph + 1);
1158 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1160 *ippp = chk->rec.data.TSN_seq;
1162 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1164 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1165 sctp_abort_an_association(stcb->sctp_ep,
1166 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1171 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1172 SCTP_DATA_LAST_FRAG) {
1173 /* Insert chk MUST be a FIRST */
1174 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1175 SCTP_DATA_FIRST_FRAG) {
1176 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1177 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1178 0, M_DONTWAIT, 1, MT_DATA);
1180 struct sctp_paramhdr *ph;
1183 SCTP_BUF_LEN(oper) =
1184 sizeof(struct sctp_paramhdr) +
1185 (3 * sizeof(uint32_t));
1187 struct sctp_paramhdr *);
1189 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1191 htons(SCTP_BUF_LEN(oper));
1192 ippp = (uint32_t *) (ph + 1);
1193 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1195 *ippp = chk->rec.data.TSN_seq;
1197 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1200 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1201 sctp_abort_an_association(stcb->sctp_ep,
1202 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1211 post_tsn = chk->rec.data.TSN_seq + 1;
1212 if (post_tsn == next->rec.data.TSN_seq) {
1214 * Ok the one I am inserting ahead of is my NEXT
1215 * one. A bit of valdiation here.
1217 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1218 /* Insert chk MUST be a last fragment */
1219 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1220 != SCTP_DATA_LAST_FRAG) {
1221 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1222 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1223 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1224 0, M_DONTWAIT, 1, MT_DATA);
1226 struct sctp_paramhdr *ph;
1229 SCTP_BUF_LEN(oper) =
1230 sizeof(struct sctp_paramhdr) +
1231 (3 * sizeof(uint32_t));
1233 struct sctp_paramhdr *);
1235 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1237 htons(SCTP_BUF_LEN(oper));
1238 ippp = (uint32_t *) (ph + 1);
1239 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1241 *ippp = chk->rec.data.TSN_seq;
1243 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1245 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1246 sctp_abort_an_association(stcb->sctp_ep,
1247 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1252 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1253 SCTP_DATA_MIDDLE_FRAG ||
1254 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1255 SCTP_DATA_LAST_FRAG) {
1257 * Insert chk CAN be MIDDLE or FIRST NOT
1260 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1261 SCTP_DATA_LAST_FRAG) {
1262 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1263 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1264 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1265 0, M_DONTWAIT, 1, MT_DATA);
1267 struct sctp_paramhdr *ph;
1270 SCTP_BUF_LEN(oper) =
1271 sizeof(struct sctp_paramhdr) +
1272 (3 * sizeof(uint32_t));
1274 struct sctp_paramhdr *);
1276 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1278 htons(SCTP_BUF_LEN(oper));
1279 ippp = (uint32_t *) (ph + 1);
1280 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1282 *ippp = chk->rec.data.TSN_seq;
1284 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1287 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1288 sctp_abort_an_association(stcb->sctp_ep,
1289 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1294 if (chk->rec.data.stream_number !=
1295 next->rec.data.stream_number) {
1297 * Huh, need the correct STR here,
1298 * they must be the same.
1300 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1301 chk->rec.data.stream_number,
1302 next->rec.data.stream_number);
1303 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1304 0, M_DONTWAIT, 1, MT_DATA);
1306 struct sctp_paramhdr *ph;
1309 SCTP_BUF_LEN(oper) =
1310 sizeof(struct sctp_paramhdr) +
1311 (3 * sizeof(uint32_t));
1313 struct sctp_paramhdr *);
1315 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1317 htons(SCTP_BUF_LEN(oper));
1318 ippp = (uint32_t *) (ph + 1);
1319 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1321 *ippp = chk->rec.data.TSN_seq;
1323 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1326 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1327 sctp_abort_an_association(stcb->sctp_ep,
1328 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1333 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1334 chk->rec.data.stream_seq !=
1335 next->rec.data.stream_seq) {
1337 * Huh, need the correct STR here,
1338 * they must be the same.
1340 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1341 chk->rec.data.stream_seq,
1342 next->rec.data.stream_seq);
1343 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1344 0, M_DONTWAIT, 1, MT_DATA);
1346 struct sctp_paramhdr *ph;
1349 SCTP_BUF_LEN(oper) =
1350 sizeof(struct sctp_paramhdr) +
1351 (3 * sizeof(uint32_t));
1353 struct sctp_paramhdr *);
1355 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1357 htons(SCTP_BUF_LEN(oper));
1358 ippp = (uint32_t *) (ph + 1);
1359 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1361 *ippp = chk->rec.data.TSN_seq;
1363 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1365 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1366 sctp_abort_an_association(stcb->sctp_ep,
1367 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1375 /* Do we need to do some delivery? check */
1376 sctp_deliver_reasm_check(stcb, asoc);
1380 * This is an unfortunate routine. It checks to make sure a evil guy is not
1381 * stuffing us full of bad packet fragments. A broken peer could also do this
1382 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1386 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1389 struct sctp_tmit_chunk *at;
1392 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1393 if (compare_with_wrap(TSN_seq,
1394 at->rec.data.TSN_seq, MAX_TSN)) {
1395 /* is it one bigger? */
1396 tsn_est = at->rec.data.TSN_seq + 1;
1397 if (tsn_est == TSN_seq) {
1398 /* yep. It better be a last then */
1399 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1400 SCTP_DATA_LAST_FRAG) {
1402 * Ok this guy belongs next to a guy
1403 * that is NOT last, it should be a
1404 * middle/last, not a complete
1410 * This guy is ok since its a LAST
1411 * and the new chunk is a fully
1412 * self- contained one.
1417 } else if (TSN_seq == at->rec.data.TSN_seq) {
1418 /* Software error since I have a dup? */
1422 * Ok, 'at' is larger than new chunk but does it
1423 * need to be right before it.
1425 tsn_est = TSN_seq + 1;
1426 if (tsn_est == at->rec.data.TSN_seq) {
1427 /* Yep, It better be a first */
1428 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1429 SCTP_DATA_FIRST_FRAG) {
1442 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1443 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1444 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1445 int *break_flag, int last_chunk)
1447 /* Process a data chunk */
1448 /* struct sctp_tmit_chunk *chk; */
1449 struct sctp_tmit_chunk *chk;
1453 int need_reasm_check = 0;
1454 uint16_t strmno, strmseq;
1456 struct sctp_queued_to_read *control;
1458 uint32_t protocol_id;
1459 uint8_t chunk_flags;
1460 struct sctp_stream_reset_list *liste;
1463 tsn = ntohl(ch->dp.tsn);
1464 chunk_flags = ch->ch.chunk_flags;
1465 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1466 asoc->send_sack = 1;
1468 protocol_id = ch->dp.protocol_id;
1469 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1470 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1471 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1476 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1477 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1478 asoc->cumulative_tsn == tsn) {
1479 /* It is a duplicate */
1480 SCTP_STAT_INCR(sctps_recvdupdata);
1481 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1482 /* Record a dup for the next outbound sack */
1483 asoc->dup_tsns[asoc->numduptsns] = tsn;
1486 asoc->send_sack = 1;
1489 /* Calculate the number of TSN's between the base and this TSN */
1490 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1491 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1492 /* Can't hold the bit in the mapping at max array, toss it */
1495 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1496 SCTP_TCB_LOCK_ASSERT(stcb);
1497 if (sctp_expand_mapping_array(asoc, gap)) {
1498 /* Can't expand, drop it */
1502 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1505 /* See if we have received this one already */
1506 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1507 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1508 SCTP_STAT_INCR(sctps_recvdupdata);
1509 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1510 /* Record a dup for the next outbound sack */
1511 asoc->dup_tsns[asoc->numduptsns] = tsn;
1514 asoc->send_sack = 1;
1518 * Check to see about the GONE flag, duplicates would cause a sack
1519 * to be sent up above
1521 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1522 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1523 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1526 * wait a minute, this guy is gone, there is no longer a
1527 * receiver. Send peer an ABORT!
1529 struct mbuf *op_err;
1531 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1532 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1537 * Now before going further we see if there is room. If NOT then we
1538 * MAY let one through only IF this TSN is the one we are waiting
1539 * for on a partial delivery API.
1542 /* now do the tests */
1543 if (((asoc->cnt_on_all_streams +
1544 asoc->cnt_on_reasm_queue +
1545 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1546 (((int)asoc->my_rwnd) <= 0)) {
1548 * When we have NO room in the rwnd we check to make sure
1549 * the reader is doing its job...
1551 if (stcb->sctp_socket->so_rcv.sb_cc) {
1552 /* some to read, wake-up */
1553 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1556 so = SCTP_INP_SO(stcb->sctp_ep);
1557 atomic_add_int(&stcb->asoc.refcnt, 1);
1558 SCTP_TCB_UNLOCK(stcb);
1559 SCTP_SOCKET_LOCK(so, 1);
1560 SCTP_TCB_LOCK(stcb);
1561 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1562 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1563 /* assoc was freed while we were unlocked */
1564 SCTP_SOCKET_UNLOCK(so, 1);
1568 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1569 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1570 SCTP_SOCKET_UNLOCK(so, 1);
1573 /* now is it in the mapping array of what we have accepted? */
1574 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
1575 compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1576 /* Nope not in the valid range dump it */
1577 sctp_set_rwnd(stcb, asoc);
1578 if ((asoc->cnt_on_all_streams +
1579 asoc->cnt_on_reasm_queue +
1580 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1581 SCTP_STAT_INCR(sctps_datadropchklmt);
1583 SCTP_STAT_INCR(sctps_datadroprwnd);
1590 strmno = ntohs(ch->dp.stream_id);
1591 if (strmno >= asoc->streamincnt) {
1592 struct sctp_paramhdr *phdr;
1595 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1596 0, M_DONTWAIT, 1, MT_DATA);
1598 /* add some space up front so prepend will work well */
1599 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1600 phdr = mtod(mb, struct sctp_paramhdr *);
1602 * Error causes are just param's and this one has
1603 * two back to back phdr, one with the error type
1604 * and size, the other with the streamid and a rsvd
1606 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1607 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1608 phdr->param_length =
1609 htons(sizeof(struct sctp_paramhdr) * 2);
1611 /* We insert the stream in the type field */
1612 phdr->param_type = ch->dp.stream_id;
1613 /* And set the length to 0 for the rsvd field */
1614 phdr->param_length = 0;
1615 sctp_queue_op_err(stcb, mb);
1617 SCTP_STAT_INCR(sctps_badsid);
1618 SCTP_TCB_LOCK_ASSERT(stcb);
1619 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1620 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1621 asoc->highest_tsn_inside_nr_map = tsn;
1623 if (tsn == (asoc->cumulative_tsn + 1)) {
1624 /* Update cum-ack */
1625 asoc->cumulative_tsn = tsn;
1630 * Before we continue lets validate that we are not being fooled by
1631 * an evil attacker. We can only have 4k chunks based on our TSN
1632 * spread allowed by the mapping array 512 * 8 bits, so there is no
1633 * way our stream sequence numbers could have wrapped. We of course
1634 * only validate the FIRST fragment so the bit must be set.
1636 strmseq = ntohs(ch->dp.stream_sequence);
1637 #ifdef SCTP_ASOCLOG_OF_TSNS
1638 SCTP_TCB_LOCK_ASSERT(stcb);
1639 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1640 asoc->tsn_in_at = 0;
1641 asoc->tsn_in_wrapped = 1;
1643 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1644 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1645 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1646 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1647 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1648 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1649 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1650 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1653 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1654 (TAILQ_EMPTY(&asoc->resetHead)) &&
1655 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1656 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1657 strmseq, MAX_SEQ) ||
1658 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1659 /* The incoming sseq is behind where we last delivered? */
1660 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1661 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1662 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1663 0, M_DONTWAIT, 1, MT_DATA);
1665 struct sctp_paramhdr *ph;
1668 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1669 (3 * sizeof(uint32_t));
1670 ph = mtod(oper, struct sctp_paramhdr *);
1671 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1672 ph->param_length = htons(SCTP_BUF_LEN(oper));
1673 ippp = (uint32_t *) (ph + 1);
1674 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1678 *ippp = ((strmno << 16) | strmseq);
1681 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1682 sctp_abort_an_association(stcb->sctp_ep, stcb,
1683 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1687 /************************************
1688 * From here down we may find ch-> invalid
1689 * so its a good idea NOT to use it.
1690 *************************************/
1692 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1693 if (last_chunk == 0) {
1694 dmbuf = SCTP_M_COPYM(*m,
1695 (offset + sizeof(struct sctp_data_chunk)),
1696 the_len, M_DONTWAIT);
1697 #ifdef SCTP_MBUF_LOGGING
1698 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1703 if (SCTP_BUF_IS_EXTENDED(mat)) {
1704 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1706 mat = SCTP_BUF_NEXT(mat);
1711 /* We can steal the last chunk */
1715 /* lop off the top part */
1716 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1717 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1718 l_len = SCTP_BUF_LEN(dmbuf);
1721 * need to count up the size hopefully does not hit
1729 l_len += SCTP_BUF_LEN(lat);
1730 lat = SCTP_BUF_NEXT(lat);
1733 if (l_len > the_len) {
1734 /* Trim the end round bytes off too */
1735 m_adj(dmbuf, -(l_len - the_len));
1738 if (dmbuf == NULL) {
1739 SCTP_STAT_INCR(sctps_nomem);
1742 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1743 asoc->fragmented_delivery_inprogress == 0 &&
1744 TAILQ_EMPTY(&asoc->resetHead) &&
1746 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1747 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1748 /* Candidate for express delivery */
1750 * Its not fragmented, No PD-API is up, Nothing in the
1751 * delivery queue, Its un-ordered OR ordered and the next to
1752 * deliver AND nothing else is stuck on the stream queue,
1753 * And there is room for it in the socket buffer. Lets just
1754 * stuff it up the buffer....
1757 /* It would be nice to avoid this copy if we could :< */
1758 sctp_alloc_a_readq(stcb, control);
1759 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1765 if (control == NULL) {
1766 goto failed_express_del;
1768 sctp_add_to_readq(stcb->sctp_ep, stcb,
1769 control, &stcb->sctp_socket->so_rcv,
1770 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1772 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1773 /* for ordered, bump what we delivered */
1774 asoc->strmin[strmno].last_sequence_delivered++;
1776 SCTP_STAT_INCR(sctps_recvexpress);
1777 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1778 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1779 SCTP_STR_LOG_FROM_EXPRS_DEL);
1783 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1784 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1785 asoc->highest_tsn_inside_nr_map = tsn;
1787 goto finish_express_del;
1790 /* If we reach here this is a new chunk */
1793 /* Express for fragmented delivery? */
1794 if ((asoc->fragmented_delivery_inprogress) &&
1795 (stcb->asoc.control_pdapi) &&
1796 (asoc->str_of_pdapi == strmno) &&
1797 (asoc->ssn_of_pdapi == strmseq)
1799 control = stcb->asoc.control_pdapi;
1800 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1801 /* Can't be another first? */
1802 goto failed_pdapi_express_del;
1804 if (tsn == (control->sinfo_tsn + 1)) {
1805 /* Yep, we can add it on */
1809 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1812 cumack = asoc->cumulative_tsn;
1813 if ((cumack + 1) == tsn)
1816 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1818 &stcb->sctp_socket->so_rcv)) {
1819 SCTP_PRINTF("Append fails end:%d\n", end);
1820 goto failed_pdapi_express_del;
1822 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1823 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1824 asoc->highest_tsn_inside_nr_map = tsn;
1826 SCTP_STAT_INCR(sctps_recvexpressm);
1827 control->sinfo_tsn = tsn;
1828 asoc->tsn_last_delivered = tsn;
1829 asoc->fragment_flags = chunk_flags;
1830 asoc->tsn_of_pdapi_last_delivered = tsn;
1831 asoc->last_flags_delivered = chunk_flags;
1832 asoc->last_strm_seq_delivered = strmseq;
1833 asoc->last_strm_no_delivered = strmno;
1835 /* clean up the flags and such */
1836 asoc->fragmented_delivery_inprogress = 0;
1837 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1838 asoc->strmin[strmno].last_sequence_delivered++;
1840 stcb->asoc.control_pdapi = NULL;
1841 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1843 * There could be another message
1846 need_reasm_check = 1;
1850 goto finish_express_del;
1853 failed_pdapi_express_del:
1855 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1856 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1857 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1858 asoc->highest_tsn_inside_nr_map = tsn;
1861 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1862 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1863 asoc->highest_tsn_inside_map = tsn;
1866 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1867 sctp_alloc_a_chunk(stcb, chk);
1869 /* No memory so we drop the chunk */
1870 SCTP_STAT_INCR(sctps_nomem);
1871 if (last_chunk == 0) {
1872 /* we copied it, free the copy */
1873 sctp_m_freem(dmbuf);
1877 chk->rec.data.TSN_seq = tsn;
1878 chk->no_fr_allowed = 0;
1879 chk->rec.data.stream_seq = strmseq;
1880 chk->rec.data.stream_number = strmno;
1881 chk->rec.data.payloadtype = protocol_id;
1882 chk->rec.data.context = stcb->asoc.context;
1883 chk->rec.data.doing_fast_retransmit = 0;
1884 chk->rec.data.rcv_flags = chunk_flags;
1886 chk->send_size = the_len;
1888 atomic_add_int(&net->ref_count, 1);
1891 sctp_alloc_a_readq(stcb, control);
1892 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1898 if (control == NULL) {
1899 /* No memory so we drop the chunk */
1900 SCTP_STAT_INCR(sctps_nomem);
1901 if (last_chunk == 0) {
1902 /* we copied it, free the copy */
1903 sctp_m_freem(dmbuf);
1907 control->length = the_len;
1910 /* Mark it as received */
1911 /* Now queue it where it belongs */
1912 if (control != NULL) {
1913 /* First a sanity check */
1914 if (asoc->fragmented_delivery_inprogress) {
1916 * Ok, we have a fragmented delivery in progress if
1917 * this chunk is next to deliver OR belongs in our
1918 * view to the reassembly, the peer is evil or
1921 uint32_t estimate_tsn;
1923 estimate_tsn = asoc->tsn_last_delivered + 1;
1924 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1925 (estimate_tsn == control->sinfo_tsn)) {
1926 /* Evil/Broke peer */
1927 sctp_m_freem(control->data);
1928 control->data = NULL;
1929 if (control->whoFrom) {
1930 sctp_free_remote_addr(control->whoFrom);
1931 control->whoFrom = NULL;
1933 sctp_free_a_readq(stcb, control);
1934 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1935 0, M_DONTWAIT, 1, MT_DATA);
1937 struct sctp_paramhdr *ph;
1940 SCTP_BUF_LEN(oper) =
1941 sizeof(struct sctp_paramhdr) +
1942 (3 * sizeof(uint32_t));
1943 ph = mtod(oper, struct sctp_paramhdr *);
1945 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1946 ph->param_length = htons(SCTP_BUF_LEN(oper));
1947 ippp = (uint32_t *) (ph + 1);
1948 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1952 *ippp = ((strmno << 16) | strmseq);
1954 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1955 sctp_abort_an_association(stcb->sctp_ep, stcb,
1956 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1961 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1962 sctp_m_freem(control->data);
1963 control->data = NULL;
1964 if (control->whoFrom) {
1965 sctp_free_remote_addr(control->whoFrom);
1966 control->whoFrom = NULL;
1968 sctp_free_a_readq(stcb, control);
1970 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1971 0, M_DONTWAIT, 1, MT_DATA);
1973 struct sctp_paramhdr *ph;
1976 SCTP_BUF_LEN(oper) =
1977 sizeof(struct sctp_paramhdr) +
1978 (3 * sizeof(uint32_t));
1980 struct sctp_paramhdr *);
1982 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1984 htons(SCTP_BUF_LEN(oper));
1985 ippp = (uint32_t *) (ph + 1);
1986 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1990 *ippp = ((strmno << 16) | strmseq);
1992 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1993 sctp_abort_an_association(stcb->sctp_ep,
1994 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2001 /* No PDAPI running */
2002 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2004 * Reassembly queue is NOT empty validate
2005 * that this tsn does not need to be in
2006 * reasembly queue. If it does then our peer
2007 * is broken or evil.
2009 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2010 sctp_m_freem(control->data);
2011 control->data = NULL;
2012 if (control->whoFrom) {
2013 sctp_free_remote_addr(control->whoFrom);
2014 control->whoFrom = NULL;
2016 sctp_free_a_readq(stcb, control);
2017 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2018 0, M_DONTWAIT, 1, MT_DATA);
2020 struct sctp_paramhdr *ph;
2023 SCTP_BUF_LEN(oper) =
2024 sizeof(struct sctp_paramhdr) +
2025 (3 * sizeof(uint32_t));
2027 struct sctp_paramhdr *);
2029 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2031 htons(SCTP_BUF_LEN(oper));
2032 ippp = (uint32_t *) (ph + 1);
2033 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2037 *ippp = ((strmno << 16) | strmseq);
2039 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2040 sctp_abort_an_association(stcb->sctp_ep,
2041 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2048 /* ok, if we reach here we have passed the sanity checks */
2049 if (chunk_flags & SCTP_DATA_UNORDERED) {
2050 /* queue directly into socket buffer */
2051 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2052 sctp_add_to_readq(stcb->sctp_ep, stcb,
2054 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2057 * Special check for when streams are resetting. We
2058 * could be more smart about this and check the
2059 * actual stream to see if it is not being reset..
2060 * that way we would not create a HOLB when amongst
2061 * streams being reset and those not being reset.
2063 * We take complete messages that have a stream reset
2064 * intervening (aka the TSN is after where our
2065 * cum-ack needs to be) off and put them on a
2066 * pending_reply_queue. The reassembly ones we do
2067 * not have to worry about since they are all sorted
2068 * and proceessed by TSN order. It is only the
2069 * singletons I must worry about.
2071 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2072 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2075 * yep its past where we need to reset... go
2076 * ahead and queue it.
2078 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2080 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2082 struct sctp_queued_to_read *ctlOn;
2083 unsigned char inserted = 0;
2085 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2087 if (compare_with_wrap(control->sinfo_tsn,
2088 ctlOn->sinfo_tsn, MAX_TSN)) {
2089 ctlOn = TAILQ_NEXT(ctlOn, next);
2092 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2097 if (inserted == 0) {
2099 * must be put at end, use
2100 * prevP (all setup from
2101 * loop) to setup nextP.
2103 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2107 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2114 /* Into the re-assembly queue */
2115 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2118 * the assoc is now gone and chk was put onto the
2119 * reasm queue, which has all been freed.
2126 if (tsn == (asoc->cumulative_tsn + 1)) {
2127 /* Update cum-ack */
2128 asoc->cumulative_tsn = tsn;
2134 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2136 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2138 SCTP_STAT_INCR(sctps_recvdata);
2139 /* Set it present please */
2140 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2141 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2143 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2144 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2145 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2147 /* check the special flag for stream resets */
2148 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2149 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2150 (asoc->cumulative_tsn == liste->tsn))
2153 * we have finished working through the backlogged TSN's now
2154 * time to reset streams. 1: call reset function. 2: free
2155 * pending_reply space 3: distribute any chunks in
2156 * pending_reply_queue.
2158 struct sctp_queued_to_read *ctl;
2160 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2161 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2162 SCTP_FREE(liste, SCTP_M_STRESET);
2163 /* sa_ignore FREED_MEMORY */
2164 liste = TAILQ_FIRST(&asoc->resetHead);
2165 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2166 if (ctl && (liste == NULL)) {
2167 /* All can be removed */
2169 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2170 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2174 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2177 /* more than one in queue */
2178 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2180 * if ctl->sinfo_tsn is <= liste->tsn we can
2181 * process it which is the NOT of
2182 * ctl->sinfo_tsn > liste->tsn
2184 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2185 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2189 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2193 * Now service re-assembly to pick up anything that has been
2194 * held on reassembly queue?
2196 sctp_deliver_reasm_check(stcb, asoc);
2197 need_reasm_check = 0;
2199 if (need_reasm_check) {
2200 /* Another one waits ? */
2201 sctp_deliver_reasm_check(stcb, asoc);
2206 int8_t sctp_map_lookup_tab[256] = {
2207 0, 1, 0, 2, 0, 1, 0, 3,
2208 0, 1, 0, 2, 0, 1, 0, 4,
2209 0, 1, 0, 2, 0, 1, 0, 3,
2210 0, 1, 0, 2, 0, 1, 0, 5,
2211 0, 1, 0, 2, 0, 1, 0, 3,
2212 0, 1, 0, 2, 0, 1, 0, 4,
2213 0, 1, 0, 2, 0, 1, 0, 3,
2214 0, 1, 0, 2, 0, 1, 0, 6,
2215 0, 1, 0, 2, 0, 1, 0, 3,
2216 0, 1, 0, 2, 0, 1, 0, 4,
2217 0, 1, 0, 2, 0, 1, 0, 3,
2218 0, 1, 0, 2, 0, 1, 0, 5,
2219 0, 1, 0, 2, 0, 1, 0, 3,
2220 0, 1, 0, 2, 0, 1, 0, 4,
2221 0, 1, 0, 2, 0, 1, 0, 3,
2222 0, 1, 0, 2, 0, 1, 0, 7,
2223 0, 1, 0, 2, 0, 1, 0, 3,
2224 0, 1, 0, 2, 0, 1, 0, 4,
2225 0, 1, 0, 2, 0, 1, 0, 3,
2226 0, 1, 0, 2, 0, 1, 0, 5,
2227 0, 1, 0, 2, 0, 1, 0, 3,
2228 0, 1, 0, 2, 0, 1, 0, 4,
2229 0, 1, 0, 2, 0, 1, 0, 3,
2230 0, 1, 0, 2, 0, 1, 0, 6,
2231 0, 1, 0, 2, 0, 1, 0, 3,
2232 0, 1, 0, 2, 0, 1, 0, 4,
2233 0, 1, 0, 2, 0, 1, 0, 3,
2234 0, 1, 0, 2, 0, 1, 0, 5,
2235 0, 1, 0, 2, 0, 1, 0, 3,
2236 0, 1, 0, 2, 0, 1, 0, 4,
2237 0, 1, 0, 2, 0, 1, 0, 3,
2238 0, 1, 0, 2, 0, 1, 0, 8
2243 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2246 * Now we also need to check the mapping array in a couple of ways.
2247 * 1) Did we move the cum-ack point?
2249 struct sctp_association *asoc;
2251 int slide_from, slide_end, lgap, distance;
2253 /* EY nr_mapping array variables */
2255 /* int nr_last_all_ones = 0; */
2256 /* int nr_slide_from, nr_slide_end, nr_lgap, nr_distance; */
2257 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2262 old_cumack = asoc->cumulative_tsn;
2263 old_base = asoc->mapping_array_base_tsn;
2264 old_highest = asoc->highest_tsn_inside_map;
2266 * We could probably improve this a small bit by calculating the
2267 * offset of the current cum-ack as the starting point.
2270 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2271 if (asoc->nr_mapping_array[slide_from] == 0xff) {
2274 /* there is a 0 bit */
2275 at += sctp_map_lookup_tab[asoc->nr_mapping_array[slide_from]];
2279 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2281 if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
2282 compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2284 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2285 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2287 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2288 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2289 sctp_print_mapping_array(asoc);
2290 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2291 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2293 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2294 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2297 if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2298 asoc->highest_tsn_inside_map,
2300 highest_tsn = asoc->highest_tsn_inside_nr_map;
2302 highest_tsn = asoc->highest_tsn_inside_map;
2304 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2305 /* The complete array was completed by a single FR */
2306 /* highest becomes the cum-ack */
2314 /* clear the array */
2315 clr = ((at + 7) >> 3);
2316 if (clr > asoc->mapping_array_size) {
2317 clr = asoc->mapping_array_size;
2319 memset(asoc->mapping_array, 0, clr);
2320 memset(asoc->nr_mapping_array, 0, clr);
2322 for (i = 0; i < asoc->mapping_array_size; i++) {
2323 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2324 printf("Error Mapping array's not clean at clear\n");
2325 sctp_print_mapping_array(asoc);
2329 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2330 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2331 } else if (at >= 8) {
2332 /* we can slide the mapping array down */
2333 /* slide_from holds where we hit the first NON 0xff byte */
2336 * now calculate the ceiling of the move using our highest
2339 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2340 slide_end = (lgap >> 3);
2341 if (slide_end < slide_from) {
2342 sctp_print_mapping_array(asoc);
2344 panic("impossible slide");
2346 printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2347 lgap, slide_end, slide_from, at);
2351 if (slide_end > asoc->mapping_array_size) {
2353 panic("would overrun buffer");
2355 printf("Gak, would have overrun map end:%d slide_end:%d\n",
2356 asoc->mapping_array_size, slide_end);
2357 slide_end = asoc->mapping_array_size;
2360 distance = (slide_end - slide_from) + 1;
2361 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2362 sctp_log_map(old_base, old_cumack, old_highest,
2363 SCTP_MAP_PREPARE_SLIDE);
2364 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2365 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2367 if (distance + slide_from > asoc->mapping_array_size ||
2370 * Here we do NOT slide forward the array so that
2371 * hopefully when more data comes in to fill it up
2372 * we will be able to slide it forward. Really I
2373 * don't think this should happen :-0
2376 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2377 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2378 (uint32_t) asoc->mapping_array_size,
2379 SCTP_MAP_SLIDE_NONE);
2384 for (ii = 0; ii < distance; ii++) {
2385 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2386 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2389 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2390 asoc->mapping_array[ii] = 0;
2391 asoc->nr_mapping_array[ii] = 0;
2393 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2394 asoc->highest_tsn_inside_map += (slide_from << 3);
2396 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2397 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2399 asoc->mapping_array_base_tsn += (slide_from << 3);
2400 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2401 sctp_log_map(asoc->mapping_array_base_tsn,
2402 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2403 SCTP_MAP_SLIDE_RESULT);
2411 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2413 struct sctp_association *asoc;
2414 uint32_t highest_tsn;
2417 if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2418 asoc->highest_tsn_inside_map,
2420 highest_tsn = asoc->highest_tsn_inside_nr_map;
2422 highest_tsn = asoc->highest_tsn_inside_map;
2426 * Now we need to see if we need to queue a sack or just start the
2427 * timer (if allowed).
2429 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2431 * Ok special case, in SHUTDOWN-SENT case. here we maker
2432 * sure SACK timer is off and instead send a SHUTDOWN and a
2435 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2436 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2437 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2439 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2440 sctp_send_sack(stcb);
2444 /* is there a gap now ? */
2445 is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
2448 * CMT DAC algorithm: increase number of packets received
2451 stcb->asoc.cmt_dac_pkts_rcvd++;
2453 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2455 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2457 (stcb->asoc.numduptsns) || /* we have dup's */
2458 (is_a_gap) || /* is still a gap */
2459 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2460 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2463 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2464 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2465 (stcb->asoc.send_sack == 0) &&
2466 (stcb->asoc.numduptsns == 0) &&
2467 (stcb->asoc.delayed_ack) &&
2468 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2471 * CMT DAC algorithm: With CMT, delay acks
2472 * even in the face of
2474 * reordering. Therefore, if acks that do not
2475 * have to be sent because of the above
2476 * reasons, will be delayed. That is, acks
2477 * that would have been sent due to gap
2478 * reports will be delayed with DAC. Start
2479 * the delayed ack timer.
2481 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2482 stcb->sctp_ep, stcb, NULL);
2485 * Ok we must build a SACK since the timer
2486 * is pending, we got our first packet OR
2487 * there are gaps or duplicates.
2489 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2490 sctp_send_sack(stcb);
2493 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2494 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2495 stcb->sctp_ep, stcb, NULL);
2502 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2504 struct sctp_tmit_chunk *chk;
2505 uint32_t tsize, pd_point;
2508 if (asoc->fragmented_delivery_inprogress) {
2509 sctp_service_reassembly(stcb, asoc);
2511 /* Can we proceed further, i.e. the PD-API is complete */
2512 if (asoc->fragmented_delivery_inprogress) {
2517 * Now is there some other chunk I can deliver from the reassembly
2521 chk = TAILQ_FIRST(&asoc->reasmqueue);
2523 asoc->size_on_reasm_queue = 0;
2524 asoc->cnt_on_reasm_queue = 0;
2527 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2528 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2529 ((nxt_todel == chk->rec.data.stream_seq) ||
2530 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2532 * Yep the first one is here. We setup to start reception,
2533 * by backing down the TSN just in case we can't deliver.
2537 * Before we start though either all of the message should
2538 * be here or the socket buffer max or nothing on the
2539 * delivery queue and something can be delivered.
2541 if (stcb->sctp_socket) {
2542 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2543 stcb->sctp_ep->partial_delivery_point);
2545 pd_point = stcb->sctp_ep->partial_delivery_point;
2547 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2548 asoc->fragmented_delivery_inprogress = 1;
2549 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2550 asoc->str_of_pdapi = chk->rec.data.stream_number;
2551 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2552 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2553 asoc->fragment_flags = chk->rec.data.rcv_flags;
2554 sctp_service_reassembly(stcb, asoc);
2555 if (asoc->fragmented_delivery_inprogress == 0) {
2563 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2564 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2565 struct sctp_nets *net, uint32_t * high_tsn)
2567 struct sctp_data_chunk *ch, chunk_buf;
2568 struct sctp_association *asoc;
2569 int num_chunks = 0; /* number of control chunks processed */
2571 int chk_length, break_flag, last_chunk;
2572 int abort_flag = 0, was_a_gap = 0;
2576 sctp_set_rwnd(stcb, &stcb->asoc);
2579 SCTP_TCB_LOCK_ASSERT(stcb);
2581 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2582 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2583 /* there was a gap before this data was processed */
2587 * setup where we got the last DATA packet from for any SACK that
2588 * may need to go out. Don't bump the net. This is done ONLY when a
2589 * chunk is assigned.
2591 asoc->last_data_chunk_from = net;
2594 * Now before we proceed we must figure out if this is a wasted
2595 * cluster... i.e. it is a small packet sent in and yet the driver
2596 * underneath allocated a full cluster for it. If so we must copy it
2597 * to a smaller mbuf and free up the cluster mbuf. This will help
2598 * with cluster starvation. Note for __Panda__ we don't do this
2599 * since it has clusters all the way down to 64 bytes.
2601 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2602 /* we only handle mbufs that are singletons.. not chains */
2603 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2605 /* ok lets see if we can copy the data up */
2608 /* get the pointers and copy */
2609 to = mtod(m, caddr_t *);
2610 from = mtod((*mm), caddr_t *);
2611 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2612 /* copy the length and free up the old */
2613 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2615 /* sucess, back copy */
2618 /* We are in trouble in the mbuf world .. yikes */
2622 /* get pointer to the first chunk header */
2623 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2624 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2629 * process all DATA chunks...
2631 *high_tsn = asoc->cumulative_tsn;
2633 asoc->data_pkts_seen++;
2634 while (stop_proc == 0) {
2635 /* validate chunk length */
2636 chk_length = ntohs(ch->ch.chunk_length);
2637 if (length - *offset < chk_length) {
2638 /* all done, mutulated chunk */
2642 if (ch->ch.chunk_type == SCTP_DATA) {
2643 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2645 * Need to send an abort since we had a
2646 * invalid data chunk.
2648 struct mbuf *op_err;
2650 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2651 0, M_DONTWAIT, 1, MT_DATA);
2654 struct sctp_paramhdr *ph;
2657 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2658 (2 * sizeof(uint32_t));
2659 ph = mtod(op_err, struct sctp_paramhdr *);
2661 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2662 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2663 ippp = (uint32_t *) (ph + 1);
2664 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2666 *ippp = asoc->cumulative_tsn;
2669 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2670 sctp_abort_association(inp, stcb, m, iphlen, sh,
2671 op_err, 0, net->port);
2674 #ifdef SCTP_AUDITING_ENABLED
2675 sctp_audit_log(0xB1, 0);
2677 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2682 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2683 chk_length, net, high_tsn, &abort_flag, &break_flag,
2692 * Set because of out of rwnd space and no
2693 * drop rep space left.
2699 /* not a data chunk in the data region */
2700 switch (ch->ch.chunk_type) {
2701 case SCTP_INITIATION:
2702 case SCTP_INITIATION_ACK:
2703 case SCTP_SELECTIVE_ACK:
2704 case SCTP_NR_SELECTIVE_ACK: /* EY */
2705 case SCTP_HEARTBEAT_REQUEST:
2706 case SCTP_HEARTBEAT_ACK:
2707 case SCTP_ABORT_ASSOCIATION:
2709 case SCTP_SHUTDOWN_ACK:
2710 case SCTP_OPERATION_ERROR:
2711 case SCTP_COOKIE_ECHO:
2712 case SCTP_COOKIE_ACK:
2715 case SCTP_SHUTDOWN_COMPLETE:
2716 case SCTP_AUTHENTICATION:
2717 case SCTP_ASCONF_ACK:
2718 case SCTP_PACKET_DROPPED:
2719 case SCTP_STREAM_RESET:
2720 case SCTP_FORWARD_CUM_TSN:
2723 * Now, what do we do with KNOWN chunks that
2724 * are NOT in the right place?
2726 * For now, I do nothing but ignore them. We
2727 * may later want to add sysctl stuff to
2728 * switch out and do either an ABORT() or
2729 * possibly process them.
2731 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2732 struct mbuf *op_err;
2734 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2735 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2740 /* unknown chunk type, use bit rules */
2741 if (ch->ch.chunk_type & 0x40) {
2742 /* Add a error report to the queue */
2744 struct sctp_paramhdr *phd;
2746 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2748 phd = mtod(merr, struct sctp_paramhdr *);
2750 * We cheat and use param
2751 * type since we did not
2752 * bother to define a error
2753 * cause struct. They are
2754 * the same basic format
2755 * with different names.
2758 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2760 htons(chk_length + sizeof(*phd));
2761 SCTP_BUF_LEN(merr) = sizeof(*phd);
2762 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2763 SCTP_SIZE32(chk_length),
2765 if (SCTP_BUF_NEXT(merr)) {
2766 sctp_queue_op_err(stcb, merr);
2772 if ((ch->ch.chunk_type & 0x80) == 0) {
2773 /* discard the rest of this packet */
2775 } /* else skip this bad chunk and
2778 }; /* switch of chunk type */
2780 *offset += SCTP_SIZE32(chk_length);
2781 if ((*offset >= length) || stop_proc) {
2782 /* no more data left in the mbuf chain */
2786 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2787 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2797 * we need to report rwnd overrun drops.
2799 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2803 * Did we get data, if so update the time for auto-close and
2804 * give peer credit for being alive.
2806 SCTP_STAT_INCR(sctps_recvpktwithdata);
2807 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2808 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2809 stcb->asoc.overall_error_count,
2811 SCTP_FROM_SCTP_INDATA,
2814 stcb->asoc.overall_error_count = 0;
2815 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2817 /* now service all of the reassm queue if needed */
2818 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2819 sctp_service_queues(stcb, asoc);
2821 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2822 /* Assure that we ack right away */
2823 stcb->asoc.send_sack = 1;
2825 /* Start a sack timer or QUEUE a SACK for sending */
2826 sctp_sack_check(stcb, was_a_gap, &abort_flag);
2834 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2835 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2837 uint32_t * biggest_newly_acked_tsn,
2838 uint32_t * this_sack_lowest_newack,
2841 struct sctp_tmit_chunk *tp1;
2842 unsigned int theTSN;
2843 int j, wake_him = 0, circled = 0;
2845 /* Recover the tp1 we last saw */
2848 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2850 for (j = frag_strt; j <= frag_end; j++) {
2851 theTSN = j + last_tsn;
2853 if (tp1->rec.data.doing_fast_retransmit)
2857 * CMT: CUCv2 algorithm. For each TSN being
2858 * processed from the sent queue, track the
2859 * next expected pseudo-cumack, or
2860 * rtx_pseudo_cumack, if required. Separate
2861 * cumack trackers for first transmissions,
2862 * and retransmissions.
2864 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2865 (tp1->snd_count == 1)) {
2866 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2867 tp1->whoTo->find_pseudo_cumack = 0;
2869 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2870 (tp1->snd_count > 1)) {
2871 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2872 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2874 if (tp1->rec.data.TSN_seq == theTSN) {
2875 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2877 * must be held until
2881 * ECN Nonce: Add the nonce
2882 * value to the sender's
2885 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2887 * If it is less than RESEND, it is
2888 * now no-longer in flight.
2889 * Higher values may already be set
2890 * via previous Gap Ack Blocks...
2891 * i.e. ACKED or RESEND.
2893 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2894 *biggest_newly_acked_tsn, MAX_TSN)) {
2895 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2898 * CMT: SFR algo (and HTNA) - set
2899 * saw_newack to 1 for dest being
2900 * newly acked. update
2901 * this_sack_highest_newack if
2904 if (tp1->rec.data.chunk_was_revoked == 0)
2905 tp1->whoTo->saw_newack = 1;
2907 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2908 tp1->whoTo->this_sack_highest_newack,
2910 tp1->whoTo->this_sack_highest_newack =
2911 tp1->rec.data.TSN_seq;
2914 * CMT DAC algo: also update
2915 * this_sack_lowest_newack
2917 if (*this_sack_lowest_newack == 0) {
2918 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2919 sctp_log_sack(*this_sack_lowest_newack,
2921 tp1->rec.data.TSN_seq,
2924 SCTP_LOG_TSN_ACKED);
2926 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2929 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2930 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2931 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2932 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2933 * Separate pseudo_cumack trackers for first transmissions and
2936 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2937 if (tp1->rec.data.chunk_was_revoked == 0) {
2938 tp1->whoTo->new_pseudo_cumack = 1;
2940 tp1->whoTo->find_pseudo_cumack = 1;
2942 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2943 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2945 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2946 if (tp1->rec.data.chunk_was_revoked == 0) {
2947 tp1->whoTo->new_pseudo_cumack = 1;
2949 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2951 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2952 sctp_log_sack(*biggest_newly_acked_tsn,
2954 tp1->rec.data.TSN_seq,
2957 SCTP_LOG_TSN_ACKED);
2959 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2960 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2961 tp1->whoTo->flight_size,
2963 (uintptr_t) tp1->whoTo,
2964 tp1->rec.data.TSN_seq);
2966 sctp_flight_size_decrease(tp1);
2967 sctp_total_flight_decrease(stcb, tp1);
2969 tp1->whoTo->net_ack += tp1->send_size;
2970 if (tp1->snd_count < 2) {
2972 * True non-retransmited chunk
2974 tp1->whoTo->net_ack2 += tp1->send_size;
2981 sctp_calculate_rto(stcb,
2984 &tp1->sent_rcv_time,
2985 sctp_align_safe_nocopy);
2990 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2991 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2992 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2993 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2994 stcb->asoc.this_sack_highest_gap,
2996 stcb->asoc.this_sack_highest_gap =
2997 tp1->rec.data.TSN_seq;
2999 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3000 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3001 #ifdef SCTP_AUDITING_ENABLED
3002 sctp_audit_log(0xB2,
3003 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3008 * All chunks NOT UNSENT fall through here and are marked
3009 * (leave PR-SCTP ones that are to skip alone though)
3011 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3012 tp1->sent = SCTP_DATAGRAM_MARKED;
3014 if (tp1->rec.data.chunk_was_revoked) {
3015 /* deflate the cwnd */
3016 tp1->whoTo->cwnd -= tp1->book_size;
3017 tp1->rec.data.chunk_was_revoked = 0;
3019 /* NR Sack code here */
3026 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3027 sctp_m_freem(tp1->data);
3034 } /* if (tp1->TSN_seq == theTSN) */
3035 if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3039 tp1 = TAILQ_NEXT(tp1, sctp_next);
3040 if ((tp1 == NULL) && (circled == 0)) {
3042 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3044 } /* end while (tp1) */
3047 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3049 /* In case the fragments were not in order we must reset */
3050 } /* end for (j = fragStart */
3052 return (wake_him); /* Return value only used for nr-sack */
3057 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3058 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3059 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3060 int num_seg, int num_nr_seg, int *ecn_seg_sums)
3062 struct sctp_gap_ack_block *frag, block;
3063 struct sctp_tmit_chunk *tp1;
3068 uint16_t frag_strt, frag_end;
3069 uint32_t last_frag_high;
3075 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3076 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3077 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3078 *offset += sizeof(block);
3080 return (chunk_freed);
3082 frag_strt = ntohs(frag->start);
3083 frag_end = ntohs(frag->end);
3084 /* some sanity checks on the fragment offsets */
3085 if (frag_strt > frag_end) {
3086 /* this one is malformed, skip */
3089 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3091 *biggest_tsn_acked = frag_end + last_tsn;
3093 /* mark acked dgs and find out the highestTSN being acked */
3095 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3096 /* save the locations of the last frags */
3097 last_frag_high = frag_end + last_tsn;
3100 * now lets see if we need to reset the queue due to
3101 * a out-of-order SACK fragment
3103 if (compare_with_wrap(frag_strt + last_tsn,
3104 last_frag_high, MAX_TSN)) {
3106 * if the new frag starts after the last TSN
3107 * frag covered, we are ok and this one is
3108 * beyond the last one
3113 * ok, they have reset us, so we need to
3114 * reset the queue this will cause extra
3115 * hunting but hey, they chose the
3116 * performance hit when they failed to order
3119 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3121 last_frag_high = frag_end + last_tsn;
3131 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3132 non_revocable, &num_frs, biggest_newly_acked_tsn,
3133 this_sack_lowest_newack, ecn_seg_sums)) {
3137 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3139 sctp_log_fr(*biggest_tsn_acked,
3140 *biggest_newly_acked_tsn,
3141 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3143 return (chunk_freed);
3147 sctp_check_for_revoked(struct sctp_tcb *stcb,
3148 struct sctp_association *asoc, uint32_t cumack,
3149 uint32_t biggest_tsn_acked)
3151 struct sctp_tmit_chunk *tp1;
3152 int tot_revoked = 0;
3154 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3156 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3159 * ok this guy is either ACK or MARKED. If it is
3160 * ACKED it has been previously acked but not this
3161 * time i.e. revoked. If it is MARKED it was ACK'ed
3164 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3169 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3170 /* it has been revoked */
3171 tp1->sent = SCTP_DATAGRAM_SENT;
3172 tp1->rec.data.chunk_was_revoked = 1;
3174 * We must add this stuff back in to assure
3175 * timers and such get started.
3177 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3178 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3179 tp1->whoTo->flight_size,
3181 (uintptr_t) tp1->whoTo,
3182 tp1->rec.data.TSN_seq);
3184 sctp_flight_size_increase(tp1);
3185 sctp_total_flight_increase(stcb, tp1);
3187 * We inflate the cwnd to compensate for our
3188 * artificial inflation of the flight_size.
3190 tp1->whoTo->cwnd += tp1->book_size;
3192 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3193 sctp_log_sack(asoc->last_acked_seq,
3195 tp1->rec.data.TSN_seq,
3198 SCTP_LOG_TSN_REVOKED);
3200 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3201 /* it has been re-acked in this SACK */
3202 tp1->sent = SCTP_DATAGRAM_ACKED;
3205 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3207 tp1 = TAILQ_NEXT(tp1, sctp_next);
3209 if (tot_revoked > 0) {
3211 * Setup the ecn nonce re-sync point. We do this since once
3212 * data is revoked we begin to retransmit things, which do
3213 * NOT have the ECN bits set. This means we are now out of
3214 * sync and must wait until we get back in sync with the
3215 * peer to check ECN bits.
3217 tp1 = TAILQ_FIRST(&asoc->send_queue);
3219 asoc->nonce_resync_tsn = asoc->sending_seq;
3221 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3223 asoc->nonce_wait_for_ecne = 0;
3224 asoc->nonce_sum_check = 0;
3230 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3231 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3233 struct sctp_tmit_chunk *tp1;
3234 int strike_flag = 0;
3236 int tot_retrans = 0;
3237 uint32_t sending_seq;
3238 struct sctp_nets *net;
3239 int num_dests_sacked = 0;
3242 * select the sending_seq, this is either the next thing ready to be
3243 * sent but not transmitted, OR, the next seq we assign.
3245 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3247 sending_seq = asoc->sending_seq;
3249 sending_seq = tp1->rec.data.TSN_seq;
3252 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3253 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3254 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3255 if (net->saw_newack)
3259 if (stcb->asoc.peer_supports_prsctp) {
3260 (void)SCTP_GETTIME_TIMEVAL(&now);
3262 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3265 if (tp1->no_fr_allowed) {
3266 /* this one had a timeout or something */
3267 tp1 = TAILQ_NEXT(tp1, sctp_next);
3270 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3271 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3272 sctp_log_fr(biggest_tsn_newly_acked,
3273 tp1->rec.data.TSN_seq,
3275 SCTP_FR_LOG_CHECK_STRIKE);
3277 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3279 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3283 if (stcb->asoc.peer_supports_prsctp) {
3284 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3285 /* Is it expired? */
3288 * TODO sctp_constants.h needs alternative
3289 * time macros when _KERNEL is undefined.
3291 (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3293 /* Yes so drop it */
3294 if (tp1->data != NULL) {
3295 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3296 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3297 SCTP_SO_NOT_LOCKED);
3299 tp1 = TAILQ_NEXT(tp1, sctp_next);
3304 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3305 asoc->this_sack_highest_gap, MAX_TSN)) {
3306 /* we are beyond the tsn in the sack */
3309 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3310 /* either a RESEND, ACKED, or MARKED */
3312 tp1 = TAILQ_NEXT(tp1, sctp_next);
3316 * CMT : SFR algo (covers part of DAC and HTNA as well)
3318 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3320 * No new acks were receieved for data sent to this
3321 * dest. Therefore, according to the SFR algo for
3322 * CMT, no data sent to this dest can be marked for
3323 * FR using this SACK.
3325 tp1 = TAILQ_NEXT(tp1, sctp_next);
3327 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3328 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3330 * CMT: New acks were receieved for data sent to
3331 * this dest. But no new acks were seen for data
3332 * sent after tp1. Therefore, according to the SFR
3333 * algo for CMT, tp1 cannot be marked for FR using
3334 * this SACK. This step covers part of the DAC algo
3335 * and the HTNA algo as well.
3337 tp1 = TAILQ_NEXT(tp1, sctp_next);
3341 * Here we check to see if we were have already done a FR
3342 * and if so we see if the biggest TSN we saw in the sack is
3343 * smaller than the recovery point. If so we don't strike
3344 * the tsn... otherwise we CAN strike the TSN.
3347 * @@@ JRI: Check for CMT if (accum_moved &&
3348 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3351 if (accum_moved && asoc->fast_retran_loss_recovery) {
3353 * Strike the TSN if in fast-recovery and cum-ack
3356 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3357 sctp_log_fr(biggest_tsn_newly_acked,
3358 tp1->rec.data.TSN_seq,
3360 SCTP_FR_LOG_STRIKE_CHUNK);
3362 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3365 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3367 * CMT DAC algorithm: If SACK flag is set to
3368 * 0, then lowest_newack test will not pass
3369 * because it would have been set to the
3370 * cumack earlier. If not already to be
3371 * rtx'd, If not a mixed sack and if tp1 is
3372 * not between two sacked TSNs, then mark by
3373 * one more. NOTE that we are marking by one
3374 * additional time since the SACK DAC flag
3375 * indicates that two packets have been
3376 * received after this missing TSN.
3378 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3379 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3380 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3381 sctp_log_fr(16 + num_dests_sacked,
3382 tp1->rec.data.TSN_seq,
3384 SCTP_FR_LOG_STRIKE_CHUNK);
3389 } else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3391 * For those that have done a FR we must take
3392 * special consideration if we strike. I.e the
3393 * biggest_newly_acked must be higher than the
3394 * sending_seq at the time we did the FR.
3397 #ifdef SCTP_FR_TO_ALTERNATE
3399 * If FR's go to new networks, then we must only do
3400 * this for singly homed asoc's. However if the FR's
3401 * go to the same network (Armando's work) then its
3402 * ok to FR multiple times.
3410 if ((compare_with_wrap(biggest_tsn_newly_acked,
3411 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3412 (biggest_tsn_newly_acked ==
3413 tp1->rec.data.fast_retran_tsn)) {
3415 * Strike the TSN, since this ack is
3416 * beyond where things were when we
3419 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3420 sctp_log_fr(biggest_tsn_newly_acked,
3421 tp1->rec.data.TSN_seq,
3423 SCTP_FR_LOG_STRIKE_CHUNK);
3425 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3429 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3431 * CMT DAC algorithm: If
3432 * SACK flag is set to 0,
3433 * then lowest_newack test
3434 * will not pass because it
3435 * would have been set to
3436 * the cumack earlier. If
3437 * not already to be rtx'd,
3438 * If not a mixed sack and
3439 * if tp1 is not between two
3440 * sacked TSNs, then mark by
3441 * one more. NOTE that we
3442 * are marking by one
3443 * additional time since the
3444 * SACK DAC flag indicates
3445 * that two packets have
3446 * been received after this
3449 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3450 (num_dests_sacked == 1) &&
3451 compare_with_wrap(this_sack_lowest_newack,
3452 tp1->rec.data.TSN_seq, MAX_TSN)) {
3453 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3454 sctp_log_fr(32 + num_dests_sacked,
3455 tp1->rec.data.TSN_seq,
3457 SCTP_FR_LOG_STRIKE_CHUNK);
3459 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3467 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3470 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3471 biggest_tsn_newly_acked, MAX_TSN)) {
3473 * We don't strike these: This is the HTNA
3474 * algorithm i.e. we don't strike If our TSN is
3475 * larger than the Highest TSN Newly Acked.
3479 /* Strike the TSN */
3480 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3481 sctp_log_fr(biggest_tsn_newly_acked,
3482 tp1->rec.data.TSN_seq,
3484 SCTP_FR_LOG_STRIKE_CHUNK);
3486 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3489 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3491 * CMT DAC algorithm: If SACK flag is set to
3492 * 0, then lowest_newack test will not pass
3493 * because it would have been set to the
3494 * cumack earlier. If not already to be
3495 * rtx'd, If not a mixed sack and if tp1 is
3496 * not between two sacked TSNs, then mark by
3497 * one more. NOTE that we are marking by one
3498 * additional time since the SACK DAC flag
3499 * indicates that two packets have been
3500 * received after this missing TSN.
3502 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3503 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3505 sctp_log_fr(48 + num_dests_sacked,
3506 tp1->rec.data.TSN_seq,
3508 SCTP_FR_LOG_STRIKE_CHUNK);
3514 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3515 struct sctp_nets *alt;
3517 /* fix counts and things */
3518 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3519 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3520 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3522 (uintptr_t) tp1->whoTo,
3523 tp1->rec.data.TSN_seq);
3526 tp1->whoTo->net_ack++;
3527 sctp_flight_size_decrease(tp1);
3529 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3530 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3531 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3533 /* add back to the rwnd */
3534 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3536 /* remove from the total flight */
3537 sctp_total_flight_decrease(stcb, tp1);
3539 if ((stcb->asoc.peer_supports_prsctp) &&
3540 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3542 * Has it been retransmitted tv_sec times? -
3543 * we store the retran count there.
3545 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3546 /* Yes, so drop it */
3547 if (tp1->data != NULL) {
3548 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3549 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3550 SCTP_SO_NOT_LOCKED);
3552 /* Make sure to flag we had a FR */
3553 tp1->whoTo->net_ack++;
3554 tp1 = TAILQ_NEXT(tp1, sctp_next);
3558 /* printf("OK, we are now ready to FR this guy\n"); */
3559 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3560 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3564 /* This is a subsequent FR */
3565 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3567 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3568 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3570 * CMT: Using RTX_SSTHRESH policy for CMT.
3571 * If CMT is being used, then pick dest with
3572 * largest ssthresh for any retransmission.
3574 tp1->no_fr_allowed = 1;
3576 /* sa_ignore NO_NULL_CHK */
3577 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3579 * JRS 5/18/07 - If CMT PF is on,
3580 * use the PF version of
3583 alt = sctp_find_alternate_net(stcb, alt, 2);
3586 * JRS 5/18/07 - If only CMT is on,
3587 * use the CMT version of
3590 /* sa_ignore NO_NULL_CHK */
3591 alt = sctp_find_alternate_net(stcb, alt, 1);
3597 * CUCv2: If a different dest is picked for
3598 * the retransmission, then new
3599 * (rtx-)pseudo_cumack needs to be tracked
3600 * for orig dest. Let CUCv2 track new (rtx-)
3601 * pseudo-cumack always.
3604 tp1->whoTo->find_pseudo_cumack = 1;
3605 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3607 } else {/* CMT is OFF */
3609 #ifdef SCTP_FR_TO_ALTERNATE
3610 /* Can we find an alternate? */
3611 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3614 * default behavior is to NOT retransmit
3615 * FR's to an alternate. Armando Caro's
3616 * paper details why.
3622 tp1->rec.data.doing_fast_retransmit = 1;
3624 /* mark the sending seq for possible subsequent FR's */
3626 * printf("Marking TSN for FR new value %x\n",
3627 * (uint32_t)tpi->rec.data.TSN_seq);
3629 if (TAILQ_EMPTY(&asoc->send_queue)) {
3631 * If the queue of send is empty then its
3632 * the next sequence number that will be
3633 * assigned so we subtract one from this to
3634 * get the one we last sent.
3636 tp1->rec.data.fast_retran_tsn = sending_seq;
3639 * If there are chunks on the send queue
3640 * (unsent data that has made it from the
3641 * stream queues but not out the door, we
3642 * take the first one (which will have the
3643 * lowest TSN) and subtract one to get the
3646 struct sctp_tmit_chunk *ttt;
3648 ttt = TAILQ_FIRST(&asoc->send_queue);
3649 tp1->rec.data.fast_retran_tsn =
3650 ttt->rec.data.TSN_seq;
3655 * this guy had a RTO calculation pending on
3660 if (alt != tp1->whoTo) {
3661 /* yes, there is an alternate. */
3662 sctp_free_remote_addr(tp1->whoTo);
3663 /* sa_ignore FREED_MEMORY */
3665 atomic_add_int(&alt->ref_count, 1);
3668 tp1 = TAILQ_NEXT(tp1, sctp_next);
3671 if (tot_retrans > 0) {
3673 * Setup the ecn nonce re-sync point. We do this since once
3674 * we go to FR something we introduce a Karn's rule scenario
3675 * and won't know the totals for the ECN bits.
3677 asoc->nonce_resync_tsn = sending_seq;
3678 asoc->nonce_wait_for_ecne = 0;
3679 asoc->nonce_sum_check = 0;
3683 struct sctp_tmit_chunk *
3684 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3685 struct sctp_association *asoc)
3687 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3691 if (asoc->peer_supports_prsctp == 0) {
3694 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3696 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3697 tp1->sent != SCTP_DATAGRAM_RESEND) {
3698 /* no chance to advance, out of here */
3701 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3702 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3703 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3704 asoc->advanced_peer_ack_point,
3705 tp1->rec.data.TSN_seq, 0, 0);
3708 if (!PR_SCTP_ENABLED(tp1->flags)) {
3710 * We can't fwd-tsn past any that are reliable aka
3711 * retransmitted until the asoc fails.
3716 (void)SCTP_GETTIME_TIMEVAL(&now);
3719 tp2 = TAILQ_NEXT(tp1, sctp_next);
3721 * now we got a chunk which is marked for another
3722 * retransmission to a PR-stream but has run out its chances
3723 * already maybe OR has been marked to skip now. Can we skip
3724 * it if its a resend?
3726 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3727 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3729 * Now is this one marked for resend and its time is
3732 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3733 /* Yes so drop it */
3735 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3736 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3737 SCTP_SO_NOT_LOCKED);
3741 * No, we are done when hit one for resend
3742 * whos time as not expired.
3748 * Ok now if this chunk is marked to drop it we can clean up
3749 * the chunk, advance our peer ack point and we can check
3752 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3753 /* advance PeerAckPoint goes forward */
3754 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3755 asoc->advanced_peer_ack_point,
3758 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3760 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3761 /* No update but we do save the chk */
3766 * If it is still in RESEND we can advance no
3772 * If we hit here we just dumped tp1, move to next tsn on
3781 sctp_fs_audit(struct sctp_association *asoc)
3783 struct sctp_tmit_chunk *chk;
3784 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3785 int entry_flight, entry_cnt, ret;
3787 entry_flight = asoc->total_flight;
3788 entry_cnt = asoc->total_flight_count;
3791 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3794 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3795 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3796 printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3797 chk->rec.data.TSN_seq,
3802 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3804 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3806 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3813 if ((inflight > 0) || (inbetween > 0)) {
3815 panic("Flight size-express incorrect? \n");
3817 printf("asoc->total_flight:%d cnt:%d\n",
3818 entry_flight, entry_cnt);
3820 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3821 inflight, inbetween, resend, above, acked);
3830 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3831 struct sctp_association *asoc,
3832 struct sctp_nets *net,
3833 struct sctp_tmit_chunk *tp1)
3835 tp1->window_probe = 0;
3836 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3837 /* TSN's skipped we do NOT move back. */
3838 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3839 tp1->whoTo->flight_size,
3841 (uintptr_t) tp1->whoTo,
3842 tp1->rec.data.TSN_seq);
3845 /* First setup this by shrinking flight */
3846 sctp_flight_size_decrease(tp1);
3847 sctp_total_flight_decrease(stcb, tp1);
3848 /* Now mark for resend */
3849 tp1->sent = SCTP_DATAGRAM_RESEND;
3850 asoc->sent_queue_retran_cnt++;
3851 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3852 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3853 tp1->whoTo->flight_size,
3855 (uintptr_t) tp1->whoTo,
3856 tp1->rec.data.TSN_seq);
3861 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3862 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3864 struct sctp_nets *net;
3865 struct sctp_association *asoc;
3866 struct sctp_tmit_chunk *tp1, *tp2;
3868 int win_probe_recovery = 0;
3869 int win_probe_recovered = 0;
3870 int j, done_once = 0;
3872 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3873 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3874 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3876 SCTP_TCB_LOCK_ASSERT(stcb);
3877 #ifdef SCTP_ASOCLOG_OF_TSNS
3878 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3879 stcb->asoc.cumack_log_at++;
3880 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3881 stcb->asoc.cumack_log_at = 0;
3885 old_rwnd = asoc->peers_rwnd;
3886 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
3889 } else if (asoc->last_acked_seq == cumack) {
3890 /* Window update sack */
3891 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3892 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3893 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3894 /* SWS sender side engages */
3895 asoc->peers_rwnd = 0;
3897 if (asoc->peers_rwnd > old_rwnd) {
3902 /* First setup for CC stuff */
3903 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3904 net->prev_cwnd = net->cwnd;
3909 * CMT: Reset CUC and Fast recovery algo variables before
3912 net->new_pseudo_cumack = 0;
3913 net->will_exit_fast_recovery = 0;
3915 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3918 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3919 tp1 = TAILQ_LAST(&asoc->sent_queue,
3920 sctpchunk_listhead);
3921 send_s = tp1->rec.data.TSN_seq + 1;
3923 send_s = asoc->sending_seq;
3925 if ((cumack == send_s) ||
3926 compare_with_wrap(cumack, send_s, MAX_TSN)) {
3932 panic("Impossible sack 1");
3937 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3938 0, M_DONTWAIT, 1, MT_DATA);
3940 struct sctp_paramhdr *ph;
3943 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3945 ph = mtod(oper, struct sctp_paramhdr *);
3946 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3947 ph->param_length = htons(SCTP_BUF_LEN(oper));
3948 ippp = (uint32_t *) (ph + 1);
3949 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3951 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3952 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3957 asoc->this_sack_highest_gap = cumack;
3958 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3959 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3960 stcb->asoc.overall_error_count,
3962 SCTP_FROM_SCTP_INDATA,
3965 stcb->asoc.overall_error_count = 0;
3966 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
3967 /* process the new consecutive TSN first */
3968 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3970 tp2 = TAILQ_NEXT(tp1, sctp_next);
3971 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
3973 cumack == tp1->rec.data.TSN_seq) {
3974 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3975 printf("Warning, an unsent is now acked?\n");
3978 * ECN Nonce: Add the nonce to the sender's
3981 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3982 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3984 * If it is less than ACKED, it is
3985 * now no-longer in flight. Higher
3986 * values may occur during marking
3988 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3989 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3990 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3991 tp1->whoTo->flight_size,
3993 (uintptr_t) tp1->whoTo,
3994 tp1->rec.data.TSN_seq);
3996 sctp_flight_size_decrease(tp1);
3997 /* sa_ignore NO_NULL_CHK */
3998 sctp_total_flight_decrease(stcb, tp1);
4000 tp1->whoTo->net_ack += tp1->send_size;
4001 if (tp1->snd_count < 2) {
4003 * True non-retransmited
4006 tp1->whoTo->net_ack2 +=
4009 /* update RTO too? */
4016 sctp_calculate_rto(stcb,
4018 &tp1->sent_rcv_time,
4019 sctp_align_safe_nocopy);
4024 * CMT: CUCv2 algorithm. From the
4025 * cumack'd TSNs, for each TSN being
4026 * acked for the first time, set the
4027 * following variables for the
4028 * corresp destination.
4029 * new_pseudo_cumack will trigger a
4031 * find_(rtx_)pseudo_cumack will
4032 * trigger search for the next
4033 * expected (rtx-)pseudo-cumack.
4035 tp1->whoTo->new_pseudo_cumack = 1;
4036 tp1->whoTo->find_pseudo_cumack = 1;
4037 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4039 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4040 /* sa_ignore NO_NULL_CHK */
4041 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4044 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4045 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4047 if (tp1->rec.data.chunk_was_revoked) {
4048 /* deflate the cwnd */
4049 tp1->whoTo->cwnd -= tp1->book_size;
4050 tp1->rec.data.chunk_was_revoked = 0;
4052 tp1->sent = SCTP_DATAGRAM_ACKED;
4053 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4055 /* sa_ignore NO_NULL_CHK */
4056 sctp_free_bufspace(stcb, asoc, tp1, 1);
4057 sctp_m_freem(tp1->data);
4059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4060 sctp_log_sack(asoc->last_acked_seq,
4062 tp1->rec.data.TSN_seq,
4065 SCTP_LOG_FREE_SENT);
4068 asoc->sent_queue_cnt--;
4069 sctp_free_a_chunk(stcb, tp1);
4077 /* sa_ignore NO_NULL_CHK */
4078 if (stcb->sctp_socket) {
4079 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4083 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4084 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4085 /* sa_ignore NO_NULL_CHK */
4086 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4088 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4089 so = SCTP_INP_SO(stcb->sctp_ep);
4090 atomic_add_int(&stcb->asoc.refcnt, 1);
4091 SCTP_TCB_UNLOCK(stcb);
4092 SCTP_SOCKET_LOCK(so, 1);
4093 SCTP_TCB_LOCK(stcb);
4094 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4095 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4096 /* assoc was freed while we were unlocked */
4097 SCTP_SOCKET_UNLOCK(so, 1);
4101 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4102 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4103 SCTP_SOCKET_UNLOCK(so, 1);
4106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4107 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4111 /* JRS - Use the congestion control given in the CC module */
4112 if (asoc->last_acked_seq != cumack)
4113 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4115 asoc->last_acked_seq = cumack;
4117 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4118 /* nothing left in-flight */
4119 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4120 net->flight_size = 0;
4121 net->partial_bytes_acked = 0;
4123 asoc->total_flight = 0;
4124 asoc->total_flight_count = 0;
4126 /* ECN Nonce updates */
4127 if (asoc->ecn_nonce_allowed) {
4128 if (asoc->nonce_sum_check) {
4129 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4130 if (asoc->nonce_wait_for_ecne == 0) {
4131 struct sctp_tmit_chunk *lchk;
4133 lchk = TAILQ_FIRST(&asoc->send_queue);
4134 asoc->nonce_wait_for_ecne = 1;
4136 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4138 asoc->nonce_wait_tsn = asoc->sending_seq;
4141 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4142 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4144 * Misbehaving peer. We need
4145 * to react to this guy
4147 asoc->ecn_allowed = 0;
4148 asoc->ecn_nonce_allowed = 0;
4153 /* See if Resynchronization Possible */
4154 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4155 asoc->nonce_sum_check = 1;
4157 * Now we must calculate what the base is.
4158 * We do this based on two things, we know
4159 * the total's for all the segments
4160 * gap-acked in the SACK (none). We also
4161 * know the SACK's nonce sum, its in
4162 * nonce_sum_flag. So we can build a truth
4163 * table to back-calculate the new value of
4164 * asoc->nonce_sum_expect_base:
4166 * SACK-flag-Value Seg-Sums Base 0 0 0
4170 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4175 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4176 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4177 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4178 /* SWS sender side engages */
4179 asoc->peers_rwnd = 0;
4181 if (asoc->peers_rwnd > old_rwnd) {
4182 win_probe_recovery = 1;
4184 /* Now assure a timer where data is queued at */
4187 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4190 if (win_probe_recovery && (net->window_probe)) {
4191 win_probe_recovered = 1;
4193 * Find first chunk that was used with window probe
4194 * and clear the sent
4196 /* sa_ignore FREED_MEMORY */
4197 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4198 if (tp1->window_probe) {
4199 /* move back to data send queue */
4200 sctp_window_probe_recovery(stcb, asoc, net, tp1);
4205 if (net->RTO == 0) {
4206 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4208 to_ticks = MSEC_TO_TICKS(net->RTO);
4210 if (net->flight_size) {
4212 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4213 sctp_timeout_handler, &net->rxt_timer);
4214 if (net->window_probe) {
4215 net->window_probe = 0;
4218 if (net->window_probe) {
4220 * In window probes we must assure a timer
4221 * is still running there
4223 net->window_probe = 0;
4224 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4225 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4226 sctp_timeout_handler, &net->rxt_timer);
4228 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4229 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4231 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4233 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4234 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4235 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4236 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4237 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4243 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4244 (asoc->sent_queue_retran_cnt == 0) &&
4245 (win_probe_recovered == 0) &&
4248 * huh, this should not happen unless all packets are
4249 * PR-SCTP and marked to skip of course.
4251 if (sctp_fs_audit(asoc)) {
4252 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4253 net->flight_size = 0;
4255 asoc->total_flight = 0;
4256 asoc->total_flight_count = 0;
4257 asoc->sent_queue_retran_cnt = 0;
4258 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4259 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4260 sctp_flight_size_increase(tp1);
4261 sctp_total_flight_increase(stcb, tp1);
4262 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4263 asoc->sent_queue_retran_cnt++;
4270 /**********************************/
4271 /* Now what about shutdown issues */
4272 /**********************************/
4273 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4274 /* nothing left on sendqueue.. consider done */
4276 if ((asoc->stream_queue_cnt == 1) &&
4277 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4278 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4279 (asoc->locked_on_sending)
4281 struct sctp_stream_queue_pending *sp;
4284 * I may be in a state where we got all across.. but
4285 * cannot write more due to a shutdown... we abort
4286 * since the user did not indicate EOR in this case.
4287 * The sp will be cleaned during free of the asoc.
4289 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4291 if ((sp) && (sp->length == 0)) {
4292 /* Let cleanup code purge it */
4293 if (sp->msg_is_complete) {
4294 asoc->stream_queue_cnt--;
4296 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4297 asoc->locked_on_sending = NULL;
4298 asoc->stream_queue_cnt--;
4302 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4303 (asoc->stream_queue_cnt == 0)) {
4304 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4305 /* Need to abort here */
4311 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4312 0, M_DONTWAIT, 1, MT_DATA);
4314 struct sctp_paramhdr *ph;
4317 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4319 ph = mtod(oper, struct sctp_paramhdr *);
4320 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4321 ph->param_length = htons(SCTP_BUF_LEN(oper));
4322 ippp = (uint32_t *) (ph + 1);
4323 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4325 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4326 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4328 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4329 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4330 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4332 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4333 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4334 sctp_stop_timers_for_shutdown(stcb);
4335 sctp_send_shutdown(stcb,
4336 stcb->asoc.primary_destination);
4337 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4338 stcb->sctp_ep, stcb, asoc->primary_destination);
4339 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4340 stcb->sctp_ep, stcb, asoc->primary_destination);
4342 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4343 (asoc->stream_queue_cnt == 0)) {
4344 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4347 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4348 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4349 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4350 sctp_send_shutdown_ack(stcb,
4351 stcb->asoc.primary_destination);
4353 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4354 stcb->sctp_ep, stcb, asoc->primary_destination);
4357 /*********************************************/
4358 /* Here we perform PR-SCTP procedures */
4360 /*********************************************/
4361 /* C1. update advancedPeerAckPoint */
4362 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4363 asoc->advanced_peer_ack_point = cumack;
4365 /* PR-Sctp issues need to be addressed too */
4366 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4367 struct sctp_tmit_chunk *lchk;
4368 uint32_t old_adv_peer_ack_point;
4370 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4371 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4372 /* C3. See if we need to send a Fwd-TSN */
4373 if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4376 * ISSUE with ECN, see FWD-TSN processing for notes
4377 * on issues that will occur when the ECN NONCE
4378 * stuff is put into SCTP for cross checking.
4380 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4382 send_forward_tsn(stcb, asoc);
4384 * ECN Nonce: Disable Nonce Sum check when
4385 * FWD TSN is sent and store resync tsn
4387 asoc->nonce_sum_check = 0;
4388 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4390 /* try to FR fwd-tsn's that get lost too */
4391 lchk->rec.data.fwd_tsn_cnt++;
4392 if (lchk->rec.data.fwd_tsn_cnt > 3) {
4393 send_forward_tsn(stcb, asoc);
4394 lchk->rec.data.fwd_tsn_cnt = 0;
4399 /* Assure a timer is up */
4400 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4401 stcb->sctp_ep, stcb, lchk->whoTo);
4404 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4405 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4407 stcb->asoc.peers_rwnd,
4408 stcb->asoc.total_flight,
4409 stcb->asoc.total_output_queue_size);
4414 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4415 struct sctp_tcb *stcb, struct sctp_nets *net_from,
4416 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4417 int *abort_now, uint8_t flags,
4418 uint32_t cum_ack, uint32_t rwnd)
4420 struct sctp_association *asoc;
4421 struct sctp_tmit_chunk *tp1, *tp2;
4422 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4423 uint32_t sav_cum_ack;
4424 uint16_t wake_him = 0;
4425 uint32_t send_s = 0;
4427 int accum_moved = 0;
4428 int will_exit_fast_recovery = 0;
4429 uint32_t a_rwnd, old_rwnd;
4430 int win_probe_recovery = 0;
4431 int win_probe_recovered = 0;
4432 struct sctp_nets *net = NULL;
4433 int nonce_sum_flag, ecn_seg_sums = 0;
4435 uint8_t reneged_all = 0;
4436 uint8_t cmt_dac_flag;
4439 * we take any chance we can to service our queues since we cannot
4440 * get awoken when the socket is read from :<
4443 * Now perform the actual SACK handling: 1) Verify that it is not an
4444 * old sack, if so discard. 2) If there is nothing left in the send
4445 * queue (cum-ack is equal to last acked) then you have a duplicate
4446 * too, update any rwnd change and verify no timers are running.
4447 * then return. 3) Process any new consequtive data i.e. cum-ack
4448 * moved process these first and note that it moved. 4) Process any
4449 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4450 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4451 * sync up flightsizes and things, stop all timers and also check
4452 * for shutdown_pending state. If so then go ahead and send off the
4453 * shutdown. If in shutdown recv, send off the shutdown-ack and
4454 * start that timer, Ret. 9) Strike any non-acked things and do FR
4455 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4456 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4457 * if in shutdown_recv state.
4459 SCTP_TCB_LOCK_ASSERT(stcb);
4461 this_sack_lowest_newack = 0;
4463 SCTP_STAT_INCR(sctps_slowpath_sack);
4465 nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4466 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4467 #ifdef SCTP_ASOCLOG_OF_TSNS
4468 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4469 stcb->asoc.cumack_log_at++;
4470 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4471 stcb->asoc.cumack_log_at = 0;
4476 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4477 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4478 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4480 old_rwnd = stcb->asoc.peers_rwnd;
4481 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4482 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4483 stcb->asoc.overall_error_count,
4485 SCTP_FROM_SCTP_INDATA,
4488 stcb->asoc.overall_error_count = 0;
4490 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4491 sctp_log_sack(asoc->last_acked_seq,
4498 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4500 uint32_t *dupdata, dblock;
4502 for (i = 0; i < num_dup; i++) {
4503 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4504 sizeof(uint32_t), (uint8_t *) & dblock);
4505 if (dupdata == NULL) {
4508 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4511 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4513 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4514 tp1 = TAILQ_LAST(&asoc->sent_queue,
4515 sctpchunk_listhead);
4516 send_s = tp1->rec.data.TSN_seq + 1;
4519 send_s = asoc->sending_seq;
4521 if (cum_ack == send_s ||
4522 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4526 * no way, we have not even sent this TSN out yet.
4527 * Peer is hopelessly messed up with us.
4529 printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4532 printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4533 tp1->rec.data.TSN_seq, tp1);
4538 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4539 0, M_DONTWAIT, 1, MT_DATA);
4541 struct sctp_paramhdr *ph;
4544 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4546 ph = mtod(oper, struct sctp_paramhdr *);
4547 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4548 ph->param_length = htons(SCTP_BUF_LEN(oper));
4549 ippp = (uint32_t *) (ph + 1);
4550 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4552 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4553 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4557 /**********************/
4558 /* 1) check the range */
4559 /**********************/
4560 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4561 /* acking something behind */
4564 sav_cum_ack = asoc->last_acked_seq;
4566 /* update the Rwnd of the peer */
4567 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4568 TAILQ_EMPTY(&asoc->send_queue) &&
4569 (asoc->stream_queue_cnt == 0)) {
4570 /* nothing left on send/sent and strmq */
4571 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4572 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4573 asoc->peers_rwnd, 0, 0, a_rwnd);
4575 asoc->peers_rwnd = a_rwnd;
4576 if (asoc->sent_queue_retran_cnt) {
4577 asoc->sent_queue_retran_cnt = 0;
4579 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4580 /* SWS sender side engages */
4581 asoc->peers_rwnd = 0;
4583 /* stop any timers */
4584 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4585 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4586 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4587 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4588 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4589 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4590 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4591 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4594 net->partial_bytes_acked = 0;
4595 net->flight_size = 0;
4597 asoc->total_flight = 0;
4598 asoc->total_flight_count = 0;
4602 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4603 * things. The total byte count acked is tracked in netAckSz AND
4604 * netAck2 is used to track the total bytes acked that are un-
4605 * amibguious and were never retransmitted. We track these on a per
4606 * destination address basis.
4608 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4609 net->prev_cwnd = net->cwnd;
4614 * CMT: Reset CUC and Fast recovery algo variables before
4617 net->new_pseudo_cumack = 0;
4618 net->will_exit_fast_recovery = 0;
4620 /* process the new consecutive TSN first */
4621 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4623 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4625 last_tsn == tp1->rec.data.TSN_seq) {
4626 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4628 * ECN Nonce: Add the nonce to the sender's
4631 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4633 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4635 * If it is less than ACKED, it is
4636 * now no-longer in flight. Higher
4637 * values may occur during marking
4639 if ((tp1->whoTo->dest_state &
4640 SCTP_ADDR_UNCONFIRMED) &&
4641 (tp1->snd_count < 2)) {
4643 * If there was no retran
4644 * and the address is
4645 * un-confirmed and we sent
4647 * sacked.. its confirmed,
4650 tp1->whoTo->dest_state &=
4651 ~SCTP_ADDR_UNCONFIRMED;
4653 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4654 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4655 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4656 tp1->whoTo->flight_size,
4658 (uintptr_t) tp1->whoTo,
4659 tp1->rec.data.TSN_seq);
4661 sctp_flight_size_decrease(tp1);
4662 sctp_total_flight_decrease(stcb, tp1);
4664 tp1->whoTo->net_ack += tp1->send_size;
4666 /* CMT SFR and DAC algos */
4667 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4668 tp1->whoTo->saw_newack = 1;
4670 if (tp1->snd_count < 2) {
4672 * True non-retransmited
4675 tp1->whoTo->net_ack2 +=
4678 /* update RTO too? */
4681 sctp_calculate_rto(stcb,
4683 &tp1->sent_rcv_time,
4684 sctp_align_safe_nocopy);
4689 * CMT: CUCv2 algorithm. From the
4690 * cumack'd TSNs, for each TSN being
4691 * acked for the first time, set the
4692 * following variables for the
4693 * corresp destination.
4694 * new_pseudo_cumack will trigger a
4696 * find_(rtx_)pseudo_cumack will
4697 * trigger search for the next
4698 * expected (rtx-)pseudo-cumack.
4700 tp1->whoTo->new_pseudo_cumack = 1;
4701 tp1->whoTo->find_pseudo_cumack = 1;
4702 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4705 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4706 sctp_log_sack(asoc->last_acked_seq,
4708 tp1->rec.data.TSN_seq,
4711 SCTP_LOG_TSN_ACKED);
4713 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4714 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4717 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4718 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4719 #ifdef SCTP_AUDITING_ENABLED
4720 sctp_audit_log(0xB3,
4721 (asoc->sent_queue_retran_cnt & 0x000000ff));
4724 if (tp1->rec.data.chunk_was_revoked) {
4725 /* deflate the cwnd */
4726 tp1->whoTo->cwnd -= tp1->book_size;
4727 tp1->rec.data.chunk_was_revoked = 0;
4729 tp1->sent = SCTP_DATAGRAM_ACKED;
4734 tp1 = TAILQ_NEXT(tp1, sctp_next);
4736 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4737 /* always set this up to cum-ack */
4738 asoc->this_sack_highest_gap = last_tsn;
4740 if ((num_seg > 0) || (num_nr_seg > 0)) {
4743 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4744 * to be greater than the cumack. Also reset saw_newack to 0
4747 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4748 net->saw_newack = 0;
4749 net->this_sack_highest_newack = last_tsn;
4753 * thisSackHighestGap will increase while handling NEW
4754 * segments this_sack_highest_newack will increase while
4755 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4756 * used for CMT DAC algo. saw_newack will also change.
4758 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4759 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4760 num_seg, num_nr_seg, &ecn_seg_sums)) {
4763 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4765 * validate the biggest_tsn_acked in the gap acks if
4766 * strict adherence is wanted.
4768 if ((biggest_tsn_acked == send_s) ||
4769 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4771 * peer is either confused or we are under
4772 * attack. We must abort.
4774 printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4782 /*******************************************/
4783 /* cancel ALL T3-send timer if accum moved */
4784 /*******************************************/
4785 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
4786 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4787 if (net->new_pseudo_cumack)
4788 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4790 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4795 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4796 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4797 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4801 /********************************************/
4802 /* drop the acked chunks from the sendqueue */
4803 /********************************************/
4804 asoc->last_acked_seq = cum_ack;
4806 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4810 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4814 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4815 /* no more sent on list */
4816 printf("Warning, tp1->sent == %d and its now acked?\n",
4819 tp2 = TAILQ_NEXT(tp1, sctp_next);
4820 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4821 if (tp1->pr_sctp_on) {
4822 if (asoc->pr_sctp_cnt != 0)
4823 asoc->pr_sctp_cnt--;
4825 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
4826 (asoc->total_flight > 0)) {
4828 panic("Warning flight size is postive and should be 0");
4830 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4831 asoc->total_flight);
4833 asoc->total_flight = 0;
4836 /* sa_ignore NO_NULL_CHK */
4837 sctp_free_bufspace(stcb, asoc, tp1, 1);
4838 sctp_m_freem(tp1->data);
4839 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4840 asoc->sent_queue_cnt_removeable--;
4843 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4844 sctp_log_sack(asoc->last_acked_seq,
4846 tp1->rec.data.TSN_seq,
4849 SCTP_LOG_FREE_SENT);
4852 asoc->sent_queue_cnt--;
4853 sctp_free_a_chunk(stcb, tp1);
4856 } while (tp1 != NULL);
4859 /* sa_ignore NO_NULL_CHK */
4860 if ((wake_him) && (stcb->sctp_socket)) {
4861 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4865 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4866 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4867 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4869 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4870 so = SCTP_INP_SO(stcb->sctp_ep);
4871 atomic_add_int(&stcb->asoc.refcnt, 1);
4872 SCTP_TCB_UNLOCK(stcb);
4873 SCTP_SOCKET_LOCK(so, 1);
4874 SCTP_TCB_LOCK(stcb);
4875 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4876 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4877 /* assoc was freed while we were unlocked */
4878 SCTP_SOCKET_UNLOCK(so, 1);
4882 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4883 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4884 SCTP_SOCKET_UNLOCK(so, 1);
4887 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4888 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4892 if (asoc->fast_retran_loss_recovery && accum_moved) {
4893 if (compare_with_wrap(asoc->last_acked_seq,
4894 asoc->fast_recovery_tsn, MAX_TSN) ||
4895 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4896 /* Setup so we will exit RFC2582 fast recovery */
4897 will_exit_fast_recovery = 1;
4901 * Check for revoked fragments:
4903 * if Previous sack - Had no frags then we can't have any revoked if
4904 * Previous sack - Had frag's then - If we now have frags aka
4905 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4906 * some of them. else - The peer revoked all ACKED fragments, since
4907 * we had some before and now we have NONE.
4911 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4912 else if (asoc->saw_sack_with_frags) {
4913 int cnt_revoked = 0;
4915 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4917 /* Peer revoked all dg's marked or acked */
4918 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4920 * EY- maybe check only if it is nr_acked
4921 * nr_marked may not be possible
4923 if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
4924 (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
4927 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4928 tp1->sent = SCTP_DATAGRAM_SENT;
4929 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4930 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4931 tp1->whoTo->flight_size,
4933 (uintptr_t) tp1->whoTo,
4934 tp1->rec.data.TSN_seq);
4936 sctp_flight_size_increase(tp1);
4937 sctp_total_flight_increase(stcb, tp1);
4938 tp1->rec.data.chunk_was_revoked = 1;
4940 * To ensure that this increase in
4941 * flightsize, which is artificial,
4942 * does not throttle the sender, we
4943 * also increase the cwnd
4946 tp1->whoTo->cwnd += tp1->book_size;
4954 asoc->saw_sack_with_frags = 0;
4956 if (num_seg || num_nr_seg)
4957 asoc->saw_sack_with_frags = 1;
4959 asoc->saw_sack_with_frags = 0;
4961 /* JRS - Use the congestion control given in the CC module */
4962 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4964 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4965 /* nothing left in-flight */
4966 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4967 /* stop all timers */
4968 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4969 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4970 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4971 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4972 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4975 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4976 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4977 net->flight_size = 0;
4978 net->partial_bytes_acked = 0;
4980 asoc->total_flight = 0;
4981 asoc->total_flight_count = 0;
4983 /**********************************/
4984 /* Now what about shutdown issues */
4985 /**********************************/
4986 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4987 /* nothing left on sendqueue.. consider done */
4988 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4989 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4990 asoc->peers_rwnd, 0, 0, a_rwnd);
4992 asoc->peers_rwnd = a_rwnd;
4993 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4994 /* SWS sender side engages */
4995 asoc->peers_rwnd = 0;
4998 if ((asoc->stream_queue_cnt == 1) &&
4999 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5000 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5001 (asoc->locked_on_sending)
5003 struct sctp_stream_queue_pending *sp;
5006 * I may be in a state where we got all across.. but
5007 * cannot write more due to a shutdown... we abort
5008 * since the user did not indicate EOR in this case.
5010 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5012 if ((sp) && (sp->length == 0)) {
5013 asoc->locked_on_sending = NULL;
5014 if (sp->msg_is_complete) {
5015 asoc->stream_queue_cnt--;
5017 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5018 asoc->stream_queue_cnt--;
5022 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5023 (asoc->stream_queue_cnt == 0)) {
5024 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5025 /* Need to abort here */
5031 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5032 0, M_DONTWAIT, 1, MT_DATA);
5034 struct sctp_paramhdr *ph;
5037 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5039 ph = mtod(oper, struct sctp_paramhdr *);
5040 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5041 ph->param_length = htons(SCTP_BUF_LEN(oper));
5042 ippp = (uint32_t *) (ph + 1);
5043 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5045 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5046 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5049 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5050 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5051 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5053 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5054 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5055 sctp_stop_timers_for_shutdown(stcb);
5056 sctp_send_shutdown(stcb,
5057 stcb->asoc.primary_destination);
5058 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5059 stcb->sctp_ep, stcb, asoc->primary_destination);
5060 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5061 stcb->sctp_ep, stcb, asoc->primary_destination);
5064 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5065 (asoc->stream_queue_cnt == 0)) {
5066 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5069 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5070 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5071 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5072 sctp_send_shutdown_ack(stcb,
5073 stcb->asoc.primary_destination);
5075 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5076 stcb->sctp_ep, stcb, asoc->primary_destination);
5081 * Now here we are going to recycle net_ack for a different use...
5084 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5089 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5090 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5091 * automatically ensure that.
5093 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5094 this_sack_lowest_newack = cum_ack;
5096 if ((num_seg > 0) || (num_nr_seg > 0)) {
5097 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5098 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5100 /* JRS - Use the congestion control given in the CC module */
5101 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5103 /******************************************************************
5104 * Here we do the stuff with ECN Nonce checking.
5105 * We basically check to see if the nonce sum flag was incorrect
5106 * or if resynchronization needs to be done. Also if we catch a
5107 * misbehaving receiver we give him the kick.
5108 ******************************************************************/
5110 if (asoc->ecn_nonce_allowed) {
5111 if (asoc->nonce_sum_check) {
5112 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5113 if (asoc->nonce_wait_for_ecne == 0) {
5114 struct sctp_tmit_chunk *lchk;
5116 lchk = TAILQ_FIRST(&asoc->send_queue);
5117 asoc->nonce_wait_for_ecne = 1;
5119 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5121 asoc->nonce_wait_tsn = asoc->sending_seq;
5124 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5125 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5127 * Misbehaving peer. We need
5128 * to react to this guy
5130 asoc->ecn_allowed = 0;
5131 asoc->ecn_nonce_allowed = 0;
5136 /* See if Resynchronization Possible */
5137 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5138 asoc->nonce_sum_check = 1;
5140 * now we must calculate what the base is.
5141 * We do this based on two things, we know
5142 * the total's for all the segments
5143 * gap-acked in the SACK, its stored in
5144 * ecn_seg_sums. We also know the SACK's
5145 * nonce sum, its in nonce_sum_flag. So we
5146 * can build a truth table to back-calculate
5148 * asoc->nonce_sum_expect_base:
5150 * SACK-flag-Value Seg-Sums Base 0 0 0
5154 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5158 /* Now are we exiting loss recovery ? */
5159 if (will_exit_fast_recovery) {
5160 /* Ok, we must exit fast recovery */
5161 asoc->fast_retran_loss_recovery = 0;
5163 if ((asoc->sat_t3_loss_recovery) &&
5164 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5166 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5167 /* end satellite t3 loss recovery */
5168 asoc->sat_t3_loss_recovery = 0;
5173 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5174 if (net->will_exit_fast_recovery) {
5175 /* Ok, we must exit fast recovery */
5176 net->fast_retran_loss_recovery = 0;
5180 /* Adjust and set the new rwnd value */
5181 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5182 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5183 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5185 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5186 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5187 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5188 /* SWS sender side engages */
5189 asoc->peers_rwnd = 0;
5191 if (asoc->peers_rwnd > old_rwnd) {
5192 win_probe_recovery = 1;
5195 * Now we must setup so we have a timer up for anyone with
5201 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5202 if (win_probe_recovery && (net->window_probe)) {
5203 win_probe_recovered = 1;
5205 * Find first chunk that was used with
5206 * window probe and clear the event. Put
5207 * it back into the send queue as if has
5210 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5211 if (tp1->window_probe) {
5212 sctp_window_probe_recovery(stcb, asoc, net, tp1);
5217 if (net->flight_size) {
5219 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5220 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5221 stcb->sctp_ep, stcb, net);
5223 if (net->window_probe) {
5224 net->window_probe = 0;
5227 if (net->window_probe) {
5229 * In window probes we must assure a timer
5230 * is still running there
5232 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5233 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5234 stcb->sctp_ep, stcb, net);
5237 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5238 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5240 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5242 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5243 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5244 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5245 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5246 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5252 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5253 (asoc->sent_queue_retran_cnt == 0) &&
5254 (win_probe_recovered == 0) &&
5257 * huh, this should not happen unless all packets are
5258 * PR-SCTP and marked to skip of course.
5260 if (sctp_fs_audit(asoc)) {
5261 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5262 net->flight_size = 0;
5264 asoc->total_flight = 0;
5265 asoc->total_flight_count = 0;
5266 asoc->sent_queue_retran_cnt = 0;
5267 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5268 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5269 sctp_flight_size_increase(tp1);
5270 sctp_total_flight_increase(stcb, tp1);
5271 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5272 asoc->sent_queue_retran_cnt++;
5279 /*********************************************/
5280 /* Here we perform PR-SCTP procedures */
5282 /*********************************************/
5283 /* C1. update advancedPeerAckPoint */
5284 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5285 asoc->advanced_peer_ack_point = cum_ack;
5287 /* C2. try to further move advancedPeerAckPoint ahead */
5288 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5289 struct sctp_tmit_chunk *lchk;
5290 uint32_t old_adv_peer_ack_point;
5292 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5293 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5294 /* C3. See if we need to send a Fwd-TSN */
5295 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5298 * ISSUE with ECN, see FWD-TSN processing for notes
5299 * on issues that will occur when the ECN NONCE
5300 * stuff is put into SCTP for cross checking.
5302 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5303 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5304 0xee, cum_ack, asoc->advanced_peer_ack_point,
5305 old_adv_peer_ack_point);
5307 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5309 send_forward_tsn(stcb, asoc);
5311 * ECN Nonce: Disable Nonce Sum check when
5312 * FWD TSN is sent and store resync tsn
5314 asoc->nonce_sum_check = 0;
5315 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5317 /* try to FR fwd-tsn's that get lost too */
5318 lchk->rec.data.fwd_tsn_cnt++;
5319 if (lchk->rec.data.fwd_tsn_cnt > 3) {
5320 send_forward_tsn(stcb, asoc);
5321 lchk->rec.data.fwd_tsn_cnt = 0;
5326 /* Assure a timer is up */
5327 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5328 stcb->sctp_ep, stcb, lchk->whoTo);
5331 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5332 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5334 stcb->asoc.peers_rwnd,
5335 stcb->asoc.total_flight,
5336 stcb->asoc.total_output_queue_size);
5341 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5342 struct sctp_nets *netp, int *abort_flag)
5345 uint32_t cum_ack, a_rwnd;
5347 cum_ack = ntohl(cp->cumulative_tsn_ack);
5348 /* Arrange so a_rwnd does NOT change */
5349 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5351 /* Now call the express sack handling */
5352 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5356 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5357 struct sctp_stream_in *strmin)
5359 struct sctp_queued_to_read *ctl, *nctl;
5360 struct sctp_association *asoc;
5364 tt = strmin->last_sequence_delivered;
5366 * First deliver anything prior to and including the stream no that
5369 ctl = TAILQ_FIRST(&strmin->inqueue);
5371 nctl = TAILQ_NEXT(ctl, next);
5372 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5373 (tt == ctl->sinfo_ssn)) {
5374 /* this is deliverable now */
5375 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5376 /* subtract pending on streams */
5377 asoc->size_on_all_streams -= ctl->length;
5378 sctp_ucount_decr(asoc->cnt_on_all_streams);
5379 /* deliver it to at least the delivery-q */
5380 if (stcb->sctp_socket) {
5381 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5382 sctp_add_to_readq(stcb->sctp_ep, stcb,
5384 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5387 /* no more delivery now. */
5393 * now we must deliver things in queue the normal way if any are
5396 tt = strmin->last_sequence_delivered + 1;
5397 ctl = TAILQ_FIRST(&strmin->inqueue);
5399 nctl = TAILQ_NEXT(ctl, next);
5400 if (tt == ctl->sinfo_ssn) {
5401 /* this is deliverable now */
5402 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5403 /* subtract pending on streams */
5404 asoc->size_on_all_streams -= ctl->length;
5405 sctp_ucount_decr(asoc->cnt_on_all_streams);
5406 /* deliver it to at least the delivery-q */
5407 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5408 if (stcb->sctp_socket) {
5409 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5410 sctp_add_to_readq(stcb->sctp_ep, stcb,
5412 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5415 tt = strmin->last_sequence_delivered + 1;
5424 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5425 struct sctp_association *asoc,
5426 uint16_t stream, uint16_t seq)
5428 struct sctp_tmit_chunk *chk, *at;
5430 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5431 /* For each one on here see if we need to toss it */
5433 * For now large messages held on the reasmqueue that are
5434 * complete will be tossed too. We could in theory do more
5435 * work to spin through and stop after dumping one msg aka
5436 * seeing the start of a new msg at the head, and call the
5437 * delivery function... to see if it can be delivered... But
5438 * for now we just dump everything on the queue.
5440 chk = TAILQ_FIRST(&asoc->reasmqueue);
5442 at = TAILQ_NEXT(chk, sctp_next);
5444 * Do not toss it if on a different stream or marked
5445 * for unordered delivery in which case the stream
5446 * sequence number has no meaning.
5448 if ((chk->rec.data.stream_number != stream) ||
5449 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5453 if (chk->rec.data.stream_seq == seq) {
5454 /* It needs to be tossed */
5455 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5456 if (compare_with_wrap(chk->rec.data.TSN_seq,
5457 asoc->tsn_last_delivered, MAX_TSN)) {
5458 asoc->tsn_last_delivered =
5459 chk->rec.data.TSN_seq;
5460 asoc->str_of_pdapi =
5461 chk->rec.data.stream_number;
5462 asoc->ssn_of_pdapi =
5463 chk->rec.data.stream_seq;
5464 asoc->fragment_flags =
5465 chk->rec.data.rcv_flags;
5467 asoc->size_on_reasm_queue -= chk->send_size;
5468 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5470 /* Clear up any stream problem */
5471 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5472 SCTP_DATA_UNORDERED &&
5473 (compare_with_wrap(chk->rec.data.stream_seq,
5474 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5477 * We must dump forward this streams
5478 * sequence number if the chunk is
5479 * not unordered that is being
5480 * skipped. There is a chance that
5481 * if the peer does not include the
5482 * last fragment in its FWD-TSN we
5483 * WILL have a problem here since
5484 * you would have a partial chunk in
5485 * queue that may not be
5486 * deliverable. Also if a Partial
5487 * delivery API as started the user
5488 * may get a partial chunk. The next
5489 * read returning a new chunk...
5490 * really ugly but I see no way
5491 * around it! Maybe a notify??
5493 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5494 chk->rec.data.stream_seq;
5497 sctp_m_freem(chk->data);
5500 sctp_free_a_chunk(stcb, chk);
5501 } else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5503 * If the stream_seq is > than the purging
5515 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5516 struct sctp_forward_tsn_chunk *fwd,
5517 int *abort_flag, struct mbuf *m, int offset)
5520 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5521 * forward TSN, when the SACK comes back that acknowledges the
5522 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5523 * get quite tricky since we may have sent more data interveneing
5524 * and must carefully account for what the SACK says on the nonce
5525 * and any gaps that are reported. This work will NOT be done here,
5526 * but I note it here since it is really related to PR-SCTP and
5530 /* The pr-sctp fwd tsn */
5532 * here we will perform all the data receiver side steps for
5533 * processing FwdTSN, as required in by pr-sctp draft:
5535 * Assume we get FwdTSN(x):
5537 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5538 * others we have 3) examine and update re-ordering queue on
5539 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5540 * report where we are.
5542 struct sctp_association *asoc;
5543 uint32_t new_cum_tsn, tsn, gap;
5544 unsigned int i, fwd_sz, cumack_set_flag, m_size, fnd = 0;
5546 struct sctp_stream_in *strm;
5547 struct sctp_tmit_chunk *chk, *at;
5548 struct sctp_queued_to_read *ctl, *sv;
5550 cumack_set_flag = 0;
5552 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5553 SCTPDBG(SCTP_DEBUG_INDATA1,
5554 "Bad size too small/big fwd-tsn\n");
5557 m_size = (stcb->asoc.mapping_array_size << 3);
5558 /*************************************************************/
5559 /* 1. Here we update local cumTSN and shift the bitmap array */
5560 /*************************************************************/
5561 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5563 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5564 asoc->cumulative_tsn == new_cum_tsn) {
5565 /* Already got there ... */
5568 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
5570 asoc->highest_tsn_inside_map = new_cum_tsn;
5573 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_nr_map,
5575 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5578 * now we know the new TSN is more advanced, let's find the actual
5581 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5582 asoc->cumulative_tsn = new_cum_tsn;
5583 if (gap >= m_size) {
5584 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5588 * out of range (of single byte chunks in the rwnd I
5589 * give out). This must be an attacker.
5592 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5593 0, M_DONTWAIT, 1, MT_DATA);
5595 struct sctp_paramhdr *ph;
5598 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5599 (sizeof(uint32_t) * 3);
5600 ph = mtod(oper, struct sctp_paramhdr *);
5601 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5602 ph->param_length = htons(SCTP_BUF_LEN(oper));
5603 ippp = (uint32_t *) (ph + 1);
5604 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5606 *ippp = asoc->highest_tsn_inside_map;
5608 *ippp = new_cum_tsn;
5610 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5611 sctp_abort_an_association(stcb->sctp_ep, stcb,
5612 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5615 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5617 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5618 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5619 asoc->highest_tsn_inside_map = new_cum_tsn;
5621 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5622 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5624 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5625 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5627 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5629 SCTP_TCB_LOCK_ASSERT(stcb);
5630 for (i = 0; i <= gap; i++) {
5631 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, i);
5632 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5633 /* FIX ME add something to set up highest TSN in map */
5635 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
5636 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5638 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, MAX_TSN) ||
5639 new_cum_tsn == asoc->highest_tsn_inside_map) {
5640 /* We must back down to see what the new highest is */
5641 for (tsn = new_cum_tsn; (compare_with_wrap(tsn, asoc->mapping_array_base_tsn, MAX_TSN) ||
5642 (tsn == asoc->mapping_array_base_tsn)); tsn--) {
5643 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
5644 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
5645 asoc->highest_tsn_inside_map = tsn;
5651 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
5655 * Now after marking all, slide thing forward but no sack
5658 sctp_slide_mapping_arrays(stcb);
5660 /*************************************************************/
5661 /* 2. Clear up re-assembly queue */
5662 /*************************************************************/
5664 * First service it if pd-api is up, just in case we can progress it
5667 if (asoc->fragmented_delivery_inprogress) {
5668 sctp_service_reassembly(stcb, asoc);
5670 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5671 /* For each one on here see if we need to toss it */
5673 * For now large messages held on the reasmqueue that are
5674 * complete will be tossed too. We could in theory do more
5675 * work to spin through and stop after dumping one msg aka
5676 * seeing the start of a new msg at the head, and call the
5677 * delivery function... to see if it can be delivered... But
5678 * for now we just dump everything on the queue.
5680 chk = TAILQ_FIRST(&asoc->reasmqueue);
5682 at = TAILQ_NEXT(chk, sctp_next);
5683 if ((compare_with_wrap(new_cum_tsn,
5684 chk->rec.data.TSN_seq, MAX_TSN)) ||
5685 (new_cum_tsn == chk->rec.data.TSN_seq)) {
5686 /* It needs to be tossed */
5687 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5688 if (compare_with_wrap(chk->rec.data.TSN_seq,
5689 asoc->tsn_last_delivered, MAX_TSN)) {
5690 asoc->tsn_last_delivered =
5691 chk->rec.data.TSN_seq;
5692 asoc->str_of_pdapi =
5693 chk->rec.data.stream_number;
5694 asoc->ssn_of_pdapi =
5695 chk->rec.data.stream_seq;
5696 asoc->fragment_flags =
5697 chk->rec.data.rcv_flags;
5699 asoc->size_on_reasm_queue -= chk->send_size;
5700 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5702 /* Clear up any stream problem */
5703 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5704 SCTP_DATA_UNORDERED &&
5705 (compare_with_wrap(chk->rec.data.stream_seq,
5706 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5709 * We must dump forward this streams
5710 * sequence number if the chunk is
5711 * not unordered that is being
5712 * skipped. There is a chance that
5713 * if the peer does not include the
5714 * last fragment in its FWD-TSN we
5715 * WILL have a problem here since
5716 * you would have a partial chunk in
5717 * queue that may not be
5718 * deliverable. Also if a Partial
5719 * delivery API as started the user
5720 * may get a partial chunk. The next
5721 * read returning a new chunk...
5722 * really ugly but I see no way
5723 * around it! Maybe a notify??
5725 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5726 chk->rec.data.stream_seq;
5729 sctp_m_freem(chk->data);
5732 sctp_free_a_chunk(stcb, chk);
5735 * Ok we have gone beyond the end of the
5743 /*******************************************************/
5744 /* 3. Update the PR-stream re-ordering queues and fix */
5745 /* delivery issues as needed. */
5746 /*******************************************************/
5747 fwd_sz -= sizeof(*fwd);
5750 unsigned int num_str;
5751 struct sctp_strseq *stseq, strseqbuf;
5753 offset += sizeof(*fwd);
5755 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5756 num_str = fwd_sz / sizeof(struct sctp_strseq);
5757 for (i = 0; i < num_str; i++) {
5760 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5761 sizeof(struct sctp_strseq),
5762 (uint8_t *) & strseqbuf);
5763 offset += sizeof(struct sctp_strseq);
5764 if (stseq == NULL) {
5768 st = ntohs(stseq->stream);
5770 st = ntohs(stseq->sequence);
5771 stseq->sequence = st;
5776 * Ok we now look for the stream/seq on the read
5777 * queue where its not all delivered. If we find it
5778 * we transmute the read entry into a PDI_ABORTED.
5780 if (stseq->stream >= asoc->streamincnt) {
5781 /* screwed up streams, stop! */
5784 if ((asoc->str_of_pdapi == stseq->stream) &&
5785 (asoc->ssn_of_pdapi == stseq->sequence)) {
5787 * If this is the one we were partially
5788 * delivering now then we no longer are.
5789 * Note this will change with the reassembly
5792 asoc->fragmented_delivery_inprogress = 0;
5794 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5795 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5796 if ((ctl->sinfo_stream == stseq->stream) &&
5797 (ctl->sinfo_ssn == stseq->sequence)) {
5798 str_seq = (stseq->stream << 16) | stseq->sequence;
5800 ctl->pdapi_aborted = 1;
5801 sv = stcb->asoc.control_pdapi;
5802 stcb->asoc.control_pdapi = ctl;
5803 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5805 SCTP_PARTIAL_DELIVERY_ABORTED,
5807 SCTP_SO_NOT_LOCKED);
5808 stcb->asoc.control_pdapi = sv;
5810 } else if ((ctl->sinfo_stream == stseq->stream) &&
5811 (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
5812 /* We are past our victim SSN */
5816 strm = &asoc->strmin[stseq->stream];
5817 if (compare_with_wrap(stseq->sequence,
5818 strm->last_sequence_delivered, MAX_SEQ)) {
5819 /* Update the sequence number */
5820 strm->last_sequence_delivered =
5823 /* now kick the stream the new way */
5824 /* sa_ignore NO_NULL_CHK */
5825 sctp_kick_prsctp_reorder_queue(stcb, strm);
5827 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5829 if (TAILQ_FIRST(&asoc->reasmqueue)) {
5830 /* now lets kick out and check for more fragmented delivery */
5831 /* sa_ignore NO_NULL_CHK */
5832 sctp_deliver_reasm_check(stcb, &stcb->asoc);