2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
109 if (calc < stcb->asoc.my_rwnd_control_len) {
118 * Build out our readq entry based on the incoming packet.
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
128 struct sctp_queued_to_read *read_queue_e = NULL;
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = stcb->asoc.context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
157 return (read_queue_e);
162 * Build out our readq entry based on the incoming packet.
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
168 struct sctp_queued_to_read *read_queue_e = NULL;
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
197 return (read_queue_e);
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
203 struct sctp_sndrcvinfo *sinfo)
205 struct sctp_sndrcvinfo *outinfo;
209 int use_extended = 0;
211 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
212 /* user does not want the sndrcv ctl */
215 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
217 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
219 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
223 ret = sctp_get_mbuf_for_msg(len,
224 0, M_DONTWAIT, 1, MT_DATA);
230 /* We need a CMSG header followed by the struct */
231 cmh = mtod(ret, struct cmsghdr *);
232 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
233 cmh->cmsg_level = IPPROTO_SCTP;
235 cmh->cmsg_type = SCTP_EXTRCV;
237 memcpy(outinfo, sinfo, len);
239 cmh->cmsg_type = SCTP_SNDRCV;
243 SCTP_BUF_LEN(ret) = cmh->cmsg_len;
249 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
251 struct sctp_sndrcvinfo *sinfo)
253 struct sctp_sndrcvinfo *outinfo;
257 int use_extended = 0;
259 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
260 /* user does not want the sndrcv ctl */
263 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
265 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
267 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
269 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
274 /* We need a CMSG header followed by the struct */
275 cmh = (struct cmsghdr *)buf;
276 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
277 cmh->cmsg_level = IPPROTO_SCTP;
279 cmh->cmsg_type = SCTP_EXTRCV;
281 memcpy(outinfo, sinfo, len);
283 cmh->cmsg_type = SCTP_SNDRCV;
292 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
294 uint32_t gap, i, cumackp1;
297 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
300 cumackp1 = asoc->cumulative_tsn + 1;
301 if (SCTP_TSN_GT(cumackp1, tsn)) {
303 * this tsn is behind the cum ack and thus we don't need to
304 * worry about it being moved from one to the other.
308 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
309 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
310 printf("gap:%x tsn:%x\n", gap, tsn);
311 sctp_print_mapping_array(asoc);
313 panic("Things are really messed up now!!");
316 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
318 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
319 asoc->highest_tsn_inside_nr_map = tsn;
321 if (tsn == asoc->highest_tsn_inside_map) {
322 /* We must back down to see what the new highest is */
323 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
324 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
325 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
326 asoc->highest_tsn_inside_map = i;
332 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
339 * We are delivering currently from the reassembly queue. We must continue to
340 * deliver until we either: 1) run out of space. 2) run out of sequential
341 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
344 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
346 struct sctp_tmit_chunk *chk, *nchk;
351 struct sctp_queued_to_read *control, *ctl, *nctl;
356 cntDel = stream_no = 0;
357 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
358 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
359 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
360 /* socket above is long gone or going.. */
362 asoc->fragmented_delivery_inprogress = 0;
363 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
364 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
365 asoc->size_on_reasm_queue -= chk->send_size;
366 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
368 * Lose the data pointer, since its in the socket
372 sctp_m_freem(chk->data);
375 /* Now free the address and data */
376 sctp_free_a_chunk(stcb, chk);
377 /* sa_ignore FREED_MEMORY */
381 SCTP_TCB_LOCK_ASSERT(stcb);
382 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
384 /* Can't deliver more :< */
387 stream_no = chk->rec.data.stream_number;
388 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
389 if (nxt_todel != chk->rec.data.stream_seq &&
390 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
392 * Not the next sequence to deliver in its stream OR
397 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
399 control = sctp_build_readq_entry_chk(stcb, chk);
400 if (control == NULL) {
404 /* save it off for our future deliveries */
405 stcb->asoc.control_pdapi = control;
406 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
410 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
411 sctp_add_to_readq(stcb->sctp_ep,
412 stcb, control, &stcb->sctp_socket->so_rcv, end,
413 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
416 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
420 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
421 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
422 stcb->asoc.control_pdapi,
423 chk->data, end, chk->rec.data.TSN_seq,
424 &stcb->sctp_socket->so_rcv)) {
426 * something is very wrong, either
427 * control_pdapi is NULL, or the tail_mbuf
428 * is corrupt, or there is a EOM already on
431 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
435 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
436 panic("This should not happen control_pdapi NULL?");
438 /* if we did not panic, it was a EOM */
439 panic("Bad chunking ??");
441 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
442 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
444 SCTP_PRINTF("Bad chunking ??\n");
445 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
453 /* pull it we did it */
454 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
455 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
456 asoc->fragmented_delivery_inprogress = 0;
457 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
458 asoc->strmin[stream_no].last_sequence_delivered++;
460 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
461 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
463 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
465 * turn the flag back on since we just delivered
468 asoc->fragmented_delivery_inprogress = 1;
470 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
471 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
472 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
473 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
475 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
476 asoc->size_on_reasm_queue -= chk->send_size;
477 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
478 /* free up the chk */
480 sctp_free_a_chunk(stcb, chk);
482 if (asoc->fragmented_delivery_inprogress == 0) {
484 * Now lets see if we can deliver the next one on
487 struct sctp_stream_in *strm;
489 strm = &asoc->strmin[stream_no];
490 nxt_todel = strm->last_sequence_delivered + 1;
491 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
492 /* Deliver more if we can. */
493 if (nxt_todel == ctl->sinfo_ssn) {
494 TAILQ_REMOVE(&strm->inqueue, ctl, next);
495 asoc->size_on_all_streams -= ctl->length;
496 sctp_ucount_decr(asoc->cnt_on_all_streams);
497 strm->last_sequence_delivered++;
498 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
499 sctp_add_to_readq(stcb->sctp_ep, stcb,
501 &stcb->sctp_socket->so_rcv, 1,
502 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
506 nxt_todel = strm->last_sequence_delivered + 1;
514 * Queue the chunk either right into the socket buffer if it is the next one
515 * to go OR put it in the correct place in the delivery queue. If we do
516 * append to the so_buf, keep doing so until we are out of order. One big
517 * question still remains, what to do when the socket buffer is FULL??
520 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
521 struct sctp_queued_to_read *control, int *abort_flag)
524 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
525 * all the data in one stream this could happen quite rapidly. One
526 * could use the TSN to keep track of things, but this scheme breaks
527 * down in the other type of stream useage that could occur. Send a
528 * single msg to stream 0, send 4Billion messages to stream 1, now
529 * send a message to stream 0. You have a situation where the TSN
530 * has wrapped but not in the stream. Is this worth worrying about
531 * or should we just change our queue sort at the bottom to be by
534 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
535 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
536 * assignment this could happen... and I don't see how this would be
537 * a violation. So for now I am undecided an will leave the sort by
538 * SSN alone. Maybe a hybred approach is the answer
541 struct sctp_stream_in *strm;
542 struct sctp_queued_to_read *at;
548 asoc->size_on_all_streams += control->length;
549 sctp_ucount_incr(asoc->cnt_on_all_streams);
550 strm = &asoc->strmin[control->sinfo_stream];
551 nxt_todel = strm->last_sequence_delivered + 1;
552 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
553 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
555 SCTPDBG(SCTP_DEBUG_INDATA1,
556 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
557 (uint32_t) control->sinfo_stream,
558 (uint32_t) strm->last_sequence_delivered,
559 (uint32_t) nxt_todel);
560 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
561 /* The incoming sseq is behind where we last delivered? */
562 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
563 control->sinfo_ssn, strm->last_sequence_delivered);
566 * throw it in the stream so it gets cleaned up in
567 * association destruction
569 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
570 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
571 0, M_DONTWAIT, 1, MT_DATA);
573 struct sctp_paramhdr *ph;
576 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
577 (sizeof(uint32_t) * 3);
578 ph = mtod(oper, struct sctp_paramhdr *);
579 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
580 ph->param_length = htons(SCTP_BUF_LEN(oper));
581 ippp = (uint32_t *) (ph + 1);
582 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
584 *ippp = control->sinfo_tsn;
586 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
588 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
589 sctp_abort_an_association(stcb->sctp_ep, stcb,
590 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
596 if (nxt_todel == control->sinfo_ssn) {
597 /* can be delivered right away? */
598 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
599 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
601 /* EY it wont be queued if it could be delivered directly */
603 asoc->size_on_all_streams -= control->length;
604 sctp_ucount_decr(asoc->cnt_on_all_streams);
605 strm->last_sequence_delivered++;
607 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
608 sctp_add_to_readq(stcb->sctp_ep, stcb,
610 &stcb->sctp_socket->so_rcv, 1,
611 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
612 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
614 nxt_todel = strm->last_sequence_delivered + 1;
615 if (nxt_todel == control->sinfo_ssn) {
616 TAILQ_REMOVE(&strm->inqueue, control, next);
617 asoc->size_on_all_streams -= control->length;
618 sctp_ucount_decr(asoc->cnt_on_all_streams);
619 strm->last_sequence_delivered++;
621 * We ignore the return of deliver_data here
622 * since we always can hold the chunk on the
623 * d-queue. And we have a finite number that
624 * can be delivered from the strq.
626 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
627 sctp_log_strm_del(control, NULL,
628 SCTP_STR_LOG_FROM_IMMED_DEL);
630 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
631 sctp_add_to_readq(stcb->sctp_ep, stcb,
633 &stcb->sctp_socket->so_rcv, 1,
634 SCTP_READ_LOCK_NOT_HELD,
643 * Ok, we did not deliver this guy, find the correct place
644 * to put it on the queue.
646 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
649 if (TAILQ_EMPTY(&strm->inqueue)) {
651 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
652 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
654 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
656 TAILQ_FOREACH(at, &strm->inqueue, next) {
657 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
659 * one in queue is bigger than the
660 * new one, insert before this one
662 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
663 sctp_log_strm_del(control, at,
664 SCTP_STR_LOG_FROM_INSERT_MD);
666 TAILQ_INSERT_BEFORE(at, control, next);
668 } else if (at->sinfo_ssn == control->sinfo_ssn) {
670 * Gak, He sent me a duplicate str
674 * foo bar, I guess I will just free
675 * this new guy, should we abort
676 * too? FIX ME MAYBE? Or it COULD be
677 * that the SSN's have wrapped.
678 * Maybe I should compare to TSN
679 * somehow... sigh for now just blow
684 sctp_m_freem(control->data);
685 control->data = NULL;
686 asoc->size_on_all_streams -= control->length;
687 sctp_ucount_decr(asoc->cnt_on_all_streams);
688 if (control->whoFrom) {
689 sctp_free_remote_addr(control->whoFrom);
690 control->whoFrom = NULL;
692 sctp_free_a_readq(stcb, control);
695 if (TAILQ_NEXT(at, next) == NULL) {
697 * We are at the end, insert
700 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
701 sctp_log_strm_del(control, at,
702 SCTP_STR_LOG_FROM_INSERT_TL);
704 TAILQ_INSERT_AFTER(&strm->inqueue,
715 * Returns two things: You get the total size of the deliverable parts of the
716 * first fragmented message on the reassembly queue. And you get a 1 back if
717 * all of the message is ready or a 0 back if the message is still incomplete
720 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
722 struct sctp_tmit_chunk *chk;
726 chk = TAILQ_FIRST(&asoc->reasmqueue);
728 /* nothing on the queue */
731 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
732 /* Not a first on the queue */
735 tsn = chk->rec.data.TSN_seq;
736 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
737 if (tsn != chk->rec.data.TSN_seq) {
740 *t_size += chk->send_size;
741 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
750 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
752 struct sctp_tmit_chunk *chk;
754 uint32_t tsize, pd_point;
757 chk = TAILQ_FIRST(&asoc->reasmqueue);
760 asoc->size_on_reasm_queue = 0;
761 asoc->cnt_on_reasm_queue = 0;
764 if (asoc->fragmented_delivery_inprogress == 0) {
766 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
767 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
768 (nxt_todel == chk->rec.data.stream_seq ||
769 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
771 * Yep the first one is here and its ok to deliver
774 if (stcb->sctp_socket) {
775 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
776 stcb->sctp_ep->partial_delivery_point);
778 pd_point = stcb->sctp_ep->partial_delivery_point;
780 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
783 * Yes, we setup to start reception, by
784 * backing down the TSN just in case we
785 * can't deliver. If we
787 asoc->fragmented_delivery_inprogress = 1;
788 asoc->tsn_last_delivered =
789 chk->rec.data.TSN_seq - 1;
791 chk->rec.data.stream_number;
792 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
793 asoc->pdapi_ppid = chk->rec.data.payloadtype;
794 asoc->fragment_flags = chk->rec.data.rcv_flags;
795 sctp_service_reassembly(stcb, asoc);
800 * Service re-assembly will deliver stream data queued at
801 * the end of fragmented delivery.. but it wont know to go
802 * back and call itself again... we do that here with the
805 sctp_service_reassembly(stcb, asoc);
806 if (asoc->fragmented_delivery_inprogress == 0) {
808 * finished our Fragmented delivery, could be more
817 * Dump onto the re-assembly queue, in its proper place. After dumping on the
818 * queue, see if anthing can be delivered. If so pull it off (or as much as
819 * we can. If we run out of space then we must dump what we can and set the
820 * appropriate flag to say we queued what we could.
823 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
824 struct sctp_tmit_chunk *chk, int *abort_flag)
827 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
829 struct sctp_tmit_chunk *at, *prev, *next;
832 cum_ackp1 = asoc->tsn_last_delivered + 1;
833 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
834 /* This is the first one on the queue */
835 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
837 * we do not check for delivery of anything when only one
840 asoc->size_on_reasm_queue = chk->send_size;
841 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
842 if (chk->rec.data.TSN_seq == cum_ackp1) {
843 if (asoc->fragmented_delivery_inprogress == 0 &&
844 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
845 SCTP_DATA_FIRST_FRAG) {
847 * An empty queue, no delivery inprogress,
848 * we hit the next one and it does NOT have
849 * a FIRST fragment mark.
851 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
852 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
853 0, M_DONTWAIT, 1, MT_DATA);
856 struct sctp_paramhdr *ph;
860 sizeof(struct sctp_paramhdr) +
861 (sizeof(uint32_t) * 3);
862 ph = mtod(oper, struct sctp_paramhdr *);
864 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
865 ph->param_length = htons(SCTP_BUF_LEN(oper));
866 ippp = (uint32_t *) (ph + 1);
867 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
869 *ippp = chk->rec.data.TSN_seq;
871 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
874 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
875 sctp_abort_an_association(stcb->sctp_ep, stcb,
876 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
878 } else if (asoc->fragmented_delivery_inprogress &&
879 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
881 * We are doing a partial delivery and the
882 * NEXT chunk MUST be either the LAST or
883 * MIDDLE fragment NOT a FIRST
885 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
886 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
887 0, M_DONTWAIT, 1, MT_DATA);
889 struct sctp_paramhdr *ph;
893 sizeof(struct sctp_paramhdr) +
894 (3 * sizeof(uint32_t));
895 ph = mtod(oper, struct sctp_paramhdr *);
897 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
898 ph->param_length = htons(SCTP_BUF_LEN(oper));
899 ippp = (uint32_t *) (ph + 1);
900 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
902 *ippp = chk->rec.data.TSN_seq;
904 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
906 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
907 sctp_abort_an_association(stcb->sctp_ep, stcb,
908 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
910 } else if (asoc->fragmented_delivery_inprogress) {
912 * Here we are ok with a MIDDLE or LAST
915 if (chk->rec.data.stream_number !=
916 asoc->str_of_pdapi) {
917 /* Got to be the right STR No */
918 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
919 chk->rec.data.stream_number,
921 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
922 0, M_DONTWAIT, 1, MT_DATA);
924 struct sctp_paramhdr *ph;
928 sizeof(struct sctp_paramhdr) +
929 (sizeof(uint32_t) * 3);
931 struct sctp_paramhdr *);
933 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
935 htons(SCTP_BUF_LEN(oper));
936 ippp = (uint32_t *) (ph + 1);
937 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
939 *ippp = chk->rec.data.TSN_seq;
941 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
943 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
944 sctp_abort_an_association(stcb->sctp_ep,
945 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
947 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
948 SCTP_DATA_UNORDERED &&
949 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
950 /* Got to be the right STR Seq */
951 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
952 chk->rec.data.stream_seq,
954 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
955 0, M_DONTWAIT, 1, MT_DATA);
957 struct sctp_paramhdr *ph;
961 sizeof(struct sctp_paramhdr) +
962 (3 * sizeof(uint32_t));
964 struct sctp_paramhdr *);
966 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
968 htons(SCTP_BUF_LEN(oper));
969 ippp = (uint32_t *) (ph + 1);
970 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
972 *ippp = chk->rec.data.TSN_seq;
974 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
977 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
978 sctp_abort_an_association(stcb->sctp_ep,
979 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
987 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
988 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
990 * one in queue is bigger than the new one, insert
994 asoc->size_on_reasm_queue += chk->send_size;
995 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
997 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
999 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1000 /* Gak, He sent me a duplicate str seq number */
1002 * foo bar, I guess I will just free this new guy,
1003 * should we abort too? FIX ME MAYBE? Or it COULD be
1004 * that the SSN's have wrapped. Maybe I should
1005 * compare to TSN somehow... sigh for now just blow
1009 sctp_m_freem(chk->data);
1012 sctp_free_a_chunk(stcb, chk);
1015 last_flags = at->rec.data.rcv_flags;
1016 last_tsn = at->rec.data.TSN_seq;
1018 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1020 * We are at the end, insert it after this
1023 /* check it first */
1024 asoc->size_on_reasm_queue += chk->send_size;
1025 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1026 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1031 /* Now the audits */
1033 prev_tsn = chk->rec.data.TSN_seq - 1;
1034 if (prev_tsn == prev->rec.data.TSN_seq) {
1036 * Ok the one I am dropping onto the end is the
1037 * NEXT. A bit of valdiation here.
1039 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1040 SCTP_DATA_FIRST_FRAG ||
1041 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1042 SCTP_DATA_MIDDLE_FRAG) {
1044 * Insert chk MUST be a MIDDLE or LAST
1047 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1048 SCTP_DATA_FIRST_FRAG) {
1049 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1050 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1051 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1052 0, M_DONTWAIT, 1, MT_DATA);
1054 struct sctp_paramhdr *ph;
1057 SCTP_BUF_LEN(oper) =
1058 sizeof(struct sctp_paramhdr) +
1059 (3 * sizeof(uint32_t));
1061 struct sctp_paramhdr *);
1063 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1065 htons(SCTP_BUF_LEN(oper));
1066 ippp = (uint32_t *) (ph + 1);
1067 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1069 *ippp = chk->rec.data.TSN_seq;
1071 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1074 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1075 sctp_abort_an_association(stcb->sctp_ep,
1076 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1080 if (chk->rec.data.stream_number !=
1081 prev->rec.data.stream_number) {
1083 * Huh, need the correct STR here,
1084 * they must be the same.
1086 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1087 chk->rec.data.stream_number,
1088 prev->rec.data.stream_number);
1089 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1090 0, M_DONTWAIT, 1, MT_DATA);
1092 struct sctp_paramhdr *ph;
1095 SCTP_BUF_LEN(oper) =
1096 sizeof(struct sctp_paramhdr) +
1097 (3 * sizeof(uint32_t));
1099 struct sctp_paramhdr *);
1101 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1103 htons(SCTP_BUF_LEN(oper));
1104 ippp = (uint32_t *) (ph + 1);
1105 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1107 *ippp = chk->rec.data.TSN_seq;
1109 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1111 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1112 sctp_abort_an_association(stcb->sctp_ep,
1113 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1118 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1119 chk->rec.data.stream_seq !=
1120 prev->rec.data.stream_seq) {
1122 * Huh, need the correct STR here,
1123 * they must be the same.
1125 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1126 chk->rec.data.stream_seq,
1127 prev->rec.data.stream_seq);
1128 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1129 0, M_DONTWAIT, 1, MT_DATA);
1131 struct sctp_paramhdr *ph;
1134 SCTP_BUF_LEN(oper) =
1135 sizeof(struct sctp_paramhdr) +
1136 (3 * sizeof(uint32_t));
1138 struct sctp_paramhdr *);
1140 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1142 htons(SCTP_BUF_LEN(oper));
1143 ippp = (uint32_t *) (ph + 1);
1144 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1146 *ippp = chk->rec.data.TSN_seq;
1148 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1150 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1151 sctp_abort_an_association(stcb->sctp_ep,
1152 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1157 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1158 SCTP_DATA_LAST_FRAG) {
1159 /* Insert chk MUST be a FIRST */
1160 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1161 SCTP_DATA_FIRST_FRAG) {
1162 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1163 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1164 0, M_DONTWAIT, 1, MT_DATA);
1166 struct sctp_paramhdr *ph;
1169 SCTP_BUF_LEN(oper) =
1170 sizeof(struct sctp_paramhdr) +
1171 (3 * sizeof(uint32_t));
1173 struct sctp_paramhdr *);
1175 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1177 htons(SCTP_BUF_LEN(oper));
1178 ippp = (uint32_t *) (ph + 1);
1179 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1181 *ippp = chk->rec.data.TSN_seq;
1183 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1186 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1187 sctp_abort_an_association(stcb->sctp_ep,
1188 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1197 post_tsn = chk->rec.data.TSN_seq + 1;
1198 if (post_tsn == next->rec.data.TSN_seq) {
1200 * Ok the one I am inserting ahead of is my NEXT
1201 * one. A bit of valdiation here.
1203 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1204 /* Insert chk MUST be a last fragment */
1205 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1206 != SCTP_DATA_LAST_FRAG) {
1207 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1208 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1209 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1210 0, M_DONTWAIT, 1, MT_DATA);
1212 struct sctp_paramhdr *ph;
1215 SCTP_BUF_LEN(oper) =
1216 sizeof(struct sctp_paramhdr) +
1217 (3 * sizeof(uint32_t));
1219 struct sctp_paramhdr *);
1221 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1223 htons(SCTP_BUF_LEN(oper));
1224 ippp = (uint32_t *) (ph + 1);
1225 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1227 *ippp = chk->rec.data.TSN_seq;
1229 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1231 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1232 sctp_abort_an_association(stcb->sctp_ep,
1233 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1238 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1239 SCTP_DATA_MIDDLE_FRAG ||
1240 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1241 SCTP_DATA_LAST_FRAG) {
1243 * Insert chk CAN be MIDDLE or FIRST NOT
1246 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1247 SCTP_DATA_LAST_FRAG) {
1248 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1249 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1250 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1251 0, M_DONTWAIT, 1, MT_DATA);
1253 struct sctp_paramhdr *ph;
1256 SCTP_BUF_LEN(oper) =
1257 sizeof(struct sctp_paramhdr) +
1258 (3 * sizeof(uint32_t));
1260 struct sctp_paramhdr *);
1262 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1264 htons(SCTP_BUF_LEN(oper));
1265 ippp = (uint32_t *) (ph + 1);
1266 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1268 *ippp = chk->rec.data.TSN_seq;
1270 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1273 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1274 sctp_abort_an_association(stcb->sctp_ep,
1275 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1280 if (chk->rec.data.stream_number !=
1281 next->rec.data.stream_number) {
1283 * Huh, need the correct STR here,
1284 * they must be the same.
1286 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1287 chk->rec.data.stream_number,
1288 next->rec.data.stream_number);
1289 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1290 0, M_DONTWAIT, 1, MT_DATA);
1292 struct sctp_paramhdr *ph;
1295 SCTP_BUF_LEN(oper) =
1296 sizeof(struct sctp_paramhdr) +
1297 (3 * sizeof(uint32_t));
1299 struct sctp_paramhdr *);
1301 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1303 htons(SCTP_BUF_LEN(oper));
1304 ippp = (uint32_t *) (ph + 1);
1305 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1307 *ippp = chk->rec.data.TSN_seq;
1309 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1312 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1313 sctp_abort_an_association(stcb->sctp_ep,
1314 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1319 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1320 chk->rec.data.stream_seq !=
1321 next->rec.data.stream_seq) {
1323 * Huh, need the correct STR here,
1324 * they must be the same.
1326 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1327 chk->rec.data.stream_seq,
1328 next->rec.data.stream_seq);
1329 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1330 0, M_DONTWAIT, 1, MT_DATA);
1332 struct sctp_paramhdr *ph;
1335 SCTP_BUF_LEN(oper) =
1336 sizeof(struct sctp_paramhdr) +
1337 (3 * sizeof(uint32_t));
1339 struct sctp_paramhdr *);
1341 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1343 htons(SCTP_BUF_LEN(oper));
1344 ippp = (uint32_t *) (ph + 1);
1345 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1347 *ippp = chk->rec.data.TSN_seq;
1349 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1351 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1352 sctp_abort_an_association(stcb->sctp_ep,
1353 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1361 /* Do we need to do some delivery? check */
1362 sctp_deliver_reasm_check(stcb, asoc);
1366 * This is an unfortunate routine. It checks to make sure a evil guy is not
1367 * stuffing us full of bad packet fragments. A broken peer could also do this
1368 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1372 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1375 struct sctp_tmit_chunk *at;
1378 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1379 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1380 /* is it one bigger? */
1381 tsn_est = at->rec.data.TSN_seq + 1;
1382 if (tsn_est == TSN_seq) {
1383 /* yep. It better be a last then */
1384 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1385 SCTP_DATA_LAST_FRAG) {
1387 * Ok this guy belongs next to a guy
1388 * that is NOT last, it should be a
1389 * middle/last, not a complete
1395 * This guy is ok since its a LAST
1396 * and the new chunk is a fully
1397 * self- contained one.
1402 } else if (TSN_seq == at->rec.data.TSN_seq) {
1403 /* Software error since I have a dup? */
1407 * Ok, 'at' is larger than new chunk but does it
1408 * need to be right before it.
1410 tsn_est = TSN_seq + 1;
1411 if (tsn_est == at->rec.data.TSN_seq) {
1412 /* Yep, It better be a first */
1413 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1414 SCTP_DATA_FIRST_FRAG) {
1427 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1428 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1429 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1430 int *break_flag, int last_chunk)
1432 /* Process a data chunk */
1433 /* struct sctp_tmit_chunk *chk; */
1434 struct sctp_tmit_chunk *chk;
1438 int need_reasm_check = 0;
1439 uint16_t strmno, strmseq;
1441 struct sctp_queued_to_read *control;
1443 uint32_t protocol_id;
1444 uint8_t chunk_flags;
1445 struct sctp_stream_reset_list *liste;
1448 tsn = ntohl(ch->dp.tsn);
1449 chunk_flags = ch->ch.chunk_flags;
1450 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1451 asoc->send_sack = 1;
1453 protocol_id = ch->dp.protocol_id;
1454 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1455 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1456 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1461 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1462 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1463 /* It is a duplicate */
1464 SCTP_STAT_INCR(sctps_recvdupdata);
1465 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1466 /* Record a dup for the next outbound sack */
1467 asoc->dup_tsns[asoc->numduptsns] = tsn;
1470 asoc->send_sack = 1;
1473 /* Calculate the number of TSN's between the base and this TSN */
1474 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1475 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1476 /* Can't hold the bit in the mapping at max array, toss it */
1479 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1480 SCTP_TCB_LOCK_ASSERT(stcb);
1481 if (sctp_expand_mapping_array(asoc, gap)) {
1482 /* Can't expand, drop it */
1486 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1489 /* See if we have received this one already */
1490 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1491 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1492 SCTP_STAT_INCR(sctps_recvdupdata);
1493 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1494 /* Record a dup for the next outbound sack */
1495 asoc->dup_tsns[asoc->numduptsns] = tsn;
1498 asoc->send_sack = 1;
1502 * Check to see about the GONE flag, duplicates would cause a sack
1503 * to be sent up above
1505 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1506 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1507 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1510 * wait a minute, this guy is gone, there is no longer a
1511 * receiver. Send peer an ABORT!
1513 struct mbuf *op_err;
1515 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1516 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1521 * Now before going further we see if there is room. If NOT then we
1522 * MAY let one through only IF this TSN is the one we are waiting
1523 * for on a partial delivery API.
1526 /* now do the tests */
1527 if (((asoc->cnt_on_all_streams +
1528 asoc->cnt_on_reasm_queue +
1529 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1530 (((int)asoc->my_rwnd) <= 0)) {
1532 * When we have NO room in the rwnd we check to make sure
1533 * the reader is doing its job...
1535 if (stcb->sctp_socket->so_rcv.sb_cc) {
1536 /* some to read, wake-up */
1537 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1540 so = SCTP_INP_SO(stcb->sctp_ep);
1541 atomic_add_int(&stcb->asoc.refcnt, 1);
1542 SCTP_TCB_UNLOCK(stcb);
1543 SCTP_SOCKET_LOCK(so, 1);
1544 SCTP_TCB_LOCK(stcb);
1545 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1546 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1547 /* assoc was freed while we were unlocked */
1548 SCTP_SOCKET_UNLOCK(so, 1);
1552 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1553 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1554 SCTP_SOCKET_UNLOCK(so, 1);
1557 /* now is it in the mapping array of what we have accepted? */
1558 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1559 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1560 /* Nope not in the valid range dump it */
1561 sctp_set_rwnd(stcb, asoc);
1562 if ((asoc->cnt_on_all_streams +
1563 asoc->cnt_on_reasm_queue +
1564 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1565 SCTP_STAT_INCR(sctps_datadropchklmt);
1567 SCTP_STAT_INCR(sctps_datadroprwnd);
1574 strmno = ntohs(ch->dp.stream_id);
1575 if (strmno >= asoc->streamincnt) {
1576 struct sctp_paramhdr *phdr;
1579 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1580 0, M_DONTWAIT, 1, MT_DATA);
1582 /* add some space up front so prepend will work well */
1583 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1584 phdr = mtod(mb, struct sctp_paramhdr *);
1586 * Error causes are just param's and this one has
1587 * two back to back phdr, one with the error type
1588 * and size, the other with the streamid and a rsvd
1590 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1591 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1592 phdr->param_length =
1593 htons(sizeof(struct sctp_paramhdr) * 2);
1595 /* We insert the stream in the type field */
1596 phdr->param_type = ch->dp.stream_id;
1597 /* And set the length to 0 for the rsvd field */
1598 phdr->param_length = 0;
1599 sctp_queue_op_err(stcb, mb);
1601 SCTP_STAT_INCR(sctps_badsid);
1602 SCTP_TCB_LOCK_ASSERT(stcb);
1603 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1604 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1605 asoc->highest_tsn_inside_nr_map = tsn;
1607 if (tsn == (asoc->cumulative_tsn + 1)) {
1608 /* Update cum-ack */
1609 asoc->cumulative_tsn = tsn;
1614 * Before we continue lets validate that we are not being fooled by
1615 * an evil attacker. We can only have 4k chunks based on our TSN
1616 * spread allowed by the mapping array 512 * 8 bits, so there is no
1617 * way our stream sequence numbers could have wrapped. We of course
1618 * only validate the FIRST fragment so the bit must be set.
1620 strmseq = ntohs(ch->dp.stream_sequence);
1621 #ifdef SCTP_ASOCLOG_OF_TSNS
1622 SCTP_TCB_LOCK_ASSERT(stcb);
1623 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1624 asoc->tsn_in_at = 0;
1625 asoc->tsn_in_wrapped = 1;
1627 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1628 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1629 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1630 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1631 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1632 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1633 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1634 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1637 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1638 (TAILQ_EMPTY(&asoc->resetHead)) &&
1639 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1640 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1641 /* The incoming sseq is behind where we last delivered? */
1642 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1643 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1644 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1645 0, M_DONTWAIT, 1, MT_DATA);
1647 struct sctp_paramhdr *ph;
1650 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1651 (3 * sizeof(uint32_t));
1652 ph = mtod(oper, struct sctp_paramhdr *);
1653 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1654 ph->param_length = htons(SCTP_BUF_LEN(oper));
1655 ippp = (uint32_t *) (ph + 1);
1656 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1660 *ippp = ((strmno << 16) | strmseq);
1663 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1664 sctp_abort_an_association(stcb->sctp_ep, stcb,
1665 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1669 /************************************
1670 * From here down we may find ch-> invalid
1671 * so its a good idea NOT to use it.
1672 *************************************/
1674 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1675 if (last_chunk == 0) {
1676 dmbuf = SCTP_M_COPYM(*m,
1677 (offset + sizeof(struct sctp_data_chunk)),
1678 the_len, M_DONTWAIT);
1679 #ifdef SCTP_MBUF_LOGGING
1680 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1685 if (SCTP_BUF_IS_EXTENDED(mat)) {
1686 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1688 mat = SCTP_BUF_NEXT(mat);
1693 /* We can steal the last chunk */
1697 /* lop off the top part */
1698 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1699 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1700 l_len = SCTP_BUF_LEN(dmbuf);
1703 * need to count up the size hopefully does not hit
1711 l_len += SCTP_BUF_LEN(lat);
1712 lat = SCTP_BUF_NEXT(lat);
1715 if (l_len > the_len) {
1716 /* Trim the end round bytes off too */
1717 m_adj(dmbuf, -(l_len - the_len));
1720 if (dmbuf == NULL) {
1721 SCTP_STAT_INCR(sctps_nomem);
1724 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1725 asoc->fragmented_delivery_inprogress == 0 &&
1726 TAILQ_EMPTY(&asoc->resetHead) &&
1728 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1729 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1730 /* Candidate for express delivery */
1732 * Its not fragmented, No PD-API is up, Nothing in the
1733 * delivery queue, Its un-ordered OR ordered and the next to
1734 * deliver AND nothing else is stuck on the stream queue,
1735 * And there is room for it in the socket buffer. Lets just
1736 * stuff it up the buffer....
1739 /* It would be nice to avoid this copy if we could :< */
1740 sctp_alloc_a_readq(stcb, control);
1741 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1747 if (control == NULL) {
1748 goto failed_express_del;
1750 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1751 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1752 asoc->highest_tsn_inside_nr_map = tsn;
1754 sctp_add_to_readq(stcb->sctp_ep, stcb,
1755 control, &stcb->sctp_socket->so_rcv,
1756 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1758 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1759 /* for ordered, bump what we delivered */
1760 asoc->strmin[strmno].last_sequence_delivered++;
1762 SCTP_STAT_INCR(sctps_recvexpress);
1763 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1764 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1765 SCTP_STR_LOG_FROM_EXPRS_DEL);
1769 goto finish_express_del;
1772 /* If we reach here this is a new chunk */
1775 /* Express for fragmented delivery? */
1776 if ((asoc->fragmented_delivery_inprogress) &&
1777 (stcb->asoc.control_pdapi) &&
1778 (asoc->str_of_pdapi == strmno) &&
1779 (asoc->ssn_of_pdapi == strmseq)
1781 control = stcb->asoc.control_pdapi;
1782 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1783 /* Can't be another first? */
1784 goto failed_pdapi_express_del;
1786 if (tsn == (control->sinfo_tsn + 1)) {
1787 /* Yep, we can add it on */
1791 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1794 cumack = asoc->cumulative_tsn;
1795 if ((cumack + 1) == tsn)
1798 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1800 &stcb->sctp_socket->so_rcv)) {
1801 SCTP_PRINTF("Append fails end:%d\n", end);
1802 goto failed_pdapi_express_del;
1804 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1805 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1806 asoc->highest_tsn_inside_nr_map = tsn;
1808 SCTP_STAT_INCR(sctps_recvexpressm);
1809 control->sinfo_tsn = tsn;
1810 asoc->tsn_last_delivered = tsn;
1811 asoc->fragment_flags = chunk_flags;
1812 asoc->tsn_of_pdapi_last_delivered = tsn;
1813 asoc->last_flags_delivered = chunk_flags;
1814 asoc->last_strm_seq_delivered = strmseq;
1815 asoc->last_strm_no_delivered = strmno;
1817 /* clean up the flags and such */
1818 asoc->fragmented_delivery_inprogress = 0;
1819 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1820 asoc->strmin[strmno].last_sequence_delivered++;
1822 stcb->asoc.control_pdapi = NULL;
1823 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1825 * There could be another message
1828 need_reasm_check = 1;
1832 goto finish_express_del;
1835 failed_pdapi_express_del:
1837 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1838 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1839 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1840 asoc->highest_tsn_inside_nr_map = tsn;
1843 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1844 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1845 asoc->highest_tsn_inside_map = tsn;
1848 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1849 sctp_alloc_a_chunk(stcb, chk);
1851 /* No memory so we drop the chunk */
1852 SCTP_STAT_INCR(sctps_nomem);
1853 if (last_chunk == 0) {
1854 /* we copied it, free the copy */
1855 sctp_m_freem(dmbuf);
1859 chk->rec.data.TSN_seq = tsn;
1860 chk->no_fr_allowed = 0;
1861 chk->rec.data.stream_seq = strmseq;
1862 chk->rec.data.stream_number = strmno;
1863 chk->rec.data.payloadtype = protocol_id;
1864 chk->rec.data.context = stcb->asoc.context;
1865 chk->rec.data.doing_fast_retransmit = 0;
1866 chk->rec.data.rcv_flags = chunk_flags;
1868 chk->send_size = the_len;
1870 atomic_add_int(&net->ref_count, 1);
1873 sctp_alloc_a_readq(stcb, control);
1874 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1880 if (control == NULL) {
1881 /* No memory so we drop the chunk */
1882 SCTP_STAT_INCR(sctps_nomem);
1883 if (last_chunk == 0) {
1884 /* we copied it, free the copy */
1885 sctp_m_freem(dmbuf);
1889 control->length = the_len;
1892 /* Mark it as received */
1893 /* Now queue it where it belongs */
1894 if (control != NULL) {
1895 /* First a sanity check */
1896 if (asoc->fragmented_delivery_inprogress) {
1898 * Ok, we have a fragmented delivery in progress if
1899 * this chunk is next to deliver OR belongs in our
1900 * view to the reassembly, the peer is evil or
1903 uint32_t estimate_tsn;
1905 estimate_tsn = asoc->tsn_last_delivered + 1;
1906 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1907 (estimate_tsn == control->sinfo_tsn)) {
1908 /* Evil/Broke peer */
1909 sctp_m_freem(control->data);
1910 control->data = NULL;
1911 if (control->whoFrom) {
1912 sctp_free_remote_addr(control->whoFrom);
1913 control->whoFrom = NULL;
1915 sctp_free_a_readq(stcb, control);
1916 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1917 0, M_DONTWAIT, 1, MT_DATA);
1919 struct sctp_paramhdr *ph;
1922 SCTP_BUF_LEN(oper) =
1923 sizeof(struct sctp_paramhdr) +
1924 (3 * sizeof(uint32_t));
1925 ph = mtod(oper, struct sctp_paramhdr *);
1927 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1928 ph->param_length = htons(SCTP_BUF_LEN(oper));
1929 ippp = (uint32_t *) (ph + 1);
1930 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1934 *ippp = ((strmno << 16) | strmseq);
1936 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1937 sctp_abort_an_association(stcb->sctp_ep, stcb,
1938 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1943 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1944 sctp_m_freem(control->data);
1945 control->data = NULL;
1946 if (control->whoFrom) {
1947 sctp_free_remote_addr(control->whoFrom);
1948 control->whoFrom = NULL;
1950 sctp_free_a_readq(stcb, control);
1952 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1953 0, M_DONTWAIT, 1, MT_DATA);
1955 struct sctp_paramhdr *ph;
1958 SCTP_BUF_LEN(oper) =
1959 sizeof(struct sctp_paramhdr) +
1960 (3 * sizeof(uint32_t));
1962 struct sctp_paramhdr *);
1964 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1966 htons(SCTP_BUF_LEN(oper));
1967 ippp = (uint32_t *) (ph + 1);
1968 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1972 *ippp = ((strmno << 16) | strmseq);
1974 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1975 sctp_abort_an_association(stcb->sctp_ep,
1976 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1983 /* No PDAPI running */
1984 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1986 * Reassembly queue is NOT empty validate
1987 * that this tsn does not need to be in
1988 * reasembly queue. If it does then our peer
1989 * is broken or evil.
1991 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1992 sctp_m_freem(control->data);
1993 control->data = NULL;
1994 if (control->whoFrom) {
1995 sctp_free_remote_addr(control->whoFrom);
1996 control->whoFrom = NULL;
1998 sctp_free_a_readq(stcb, control);
1999 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2000 0, M_DONTWAIT, 1, MT_DATA);
2002 struct sctp_paramhdr *ph;
2005 SCTP_BUF_LEN(oper) =
2006 sizeof(struct sctp_paramhdr) +
2007 (3 * sizeof(uint32_t));
2009 struct sctp_paramhdr *);
2011 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2013 htons(SCTP_BUF_LEN(oper));
2014 ippp = (uint32_t *) (ph + 1);
2015 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2019 *ippp = ((strmno << 16) | strmseq);
2021 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2022 sctp_abort_an_association(stcb->sctp_ep,
2023 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2030 /* ok, if we reach here we have passed the sanity checks */
2031 if (chunk_flags & SCTP_DATA_UNORDERED) {
2032 /* queue directly into socket buffer */
2033 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2034 sctp_add_to_readq(stcb->sctp_ep, stcb,
2036 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2039 * Special check for when streams are resetting. We
2040 * could be more smart about this and check the
2041 * actual stream to see if it is not being reset..
2042 * that way we would not create a HOLB when amongst
2043 * streams being reset and those not being reset.
2045 * We take complete messages that have a stream reset
2046 * intervening (aka the TSN is after where our
2047 * cum-ack needs to be) off and put them on a
2048 * pending_reply_queue. The reassembly ones we do
2049 * not have to worry about since they are all sorted
2050 * and proceessed by TSN order. It is only the
2051 * singletons I must worry about.
2053 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2054 SCTP_TSN_GT(tsn, liste->tsn)) {
2056 * yep its past where we need to reset... go
2057 * ahead and queue it.
2059 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2061 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2063 struct sctp_queued_to_read *ctlOn,
2065 unsigned char inserted = 0;
2067 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2068 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2072 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2077 if (inserted == 0) {
2079 * must be put at end, use
2080 * prevP (all setup from
2081 * loop) to setup nextP.
2083 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2087 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2094 /* Into the re-assembly queue */
2095 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2098 * the assoc is now gone and chk was put onto the
2099 * reasm queue, which has all been freed.
2106 if (tsn == (asoc->cumulative_tsn + 1)) {
2107 /* Update cum-ack */
2108 asoc->cumulative_tsn = tsn;
2114 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2116 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2118 SCTP_STAT_INCR(sctps_recvdata);
2119 /* Set it present please */
2120 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2121 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2123 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2124 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2125 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2127 /* check the special flag for stream resets */
2128 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2129 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2131 * we have finished working through the backlogged TSN's now
2132 * time to reset streams. 1: call reset function. 2: free
2133 * pending_reply space 3: distribute any chunks in
2134 * pending_reply_queue.
2136 struct sctp_queued_to_read *ctl, *nctl;
2138 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2139 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2140 SCTP_FREE(liste, SCTP_M_STRESET);
2141 /* sa_ignore FREED_MEMORY */
2142 liste = TAILQ_FIRST(&asoc->resetHead);
2143 if (TAILQ_EMPTY(&asoc->resetHead)) {
2144 /* All can be removed */
2145 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2146 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2147 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2153 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2154 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2158 * if ctl->sinfo_tsn is <= liste->tsn we can
2159 * process it which is the NOT of
2160 * ctl->sinfo_tsn > liste->tsn
2162 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2163 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2170 * Now service re-assembly to pick up anything that has been
2171 * held on reassembly queue?
2173 sctp_deliver_reasm_check(stcb, asoc);
2174 need_reasm_check = 0;
2176 if (need_reasm_check) {
2177 /* Another one waits ? */
2178 sctp_deliver_reasm_check(stcb, asoc);
2183 int8_t sctp_map_lookup_tab[256] = {
2184 0, 1, 0, 2, 0, 1, 0, 3,
2185 0, 1, 0, 2, 0, 1, 0, 4,
2186 0, 1, 0, 2, 0, 1, 0, 3,
2187 0, 1, 0, 2, 0, 1, 0, 5,
2188 0, 1, 0, 2, 0, 1, 0, 3,
2189 0, 1, 0, 2, 0, 1, 0, 4,
2190 0, 1, 0, 2, 0, 1, 0, 3,
2191 0, 1, 0, 2, 0, 1, 0, 6,
2192 0, 1, 0, 2, 0, 1, 0, 3,
2193 0, 1, 0, 2, 0, 1, 0, 4,
2194 0, 1, 0, 2, 0, 1, 0, 3,
2195 0, 1, 0, 2, 0, 1, 0, 5,
2196 0, 1, 0, 2, 0, 1, 0, 3,
2197 0, 1, 0, 2, 0, 1, 0, 4,
2198 0, 1, 0, 2, 0, 1, 0, 3,
2199 0, 1, 0, 2, 0, 1, 0, 7,
2200 0, 1, 0, 2, 0, 1, 0, 3,
2201 0, 1, 0, 2, 0, 1, 0, 4,
2202 0, 1, 0, 2, 0, 1, 0, 3,
2203 0, 1, 0, 2, 0, 1, 0, 5,
2204 0, 1, 0, 2, 0, 1, 0, 3,
2205 0, 1, 0, 2, 0, 1, 0, 4,
2206 0, 1, 0, 2, 0, 1, 0, 3,
2207 0, 1, 0, 2, 0, 1, 0, 6,
2208 0, 1, 0, 2, 0, 1, 0, 3,
2209 0, 1, 0, 2, 0, 1, 0, 4,
2210 0, 1, 0, 2, 0, 1, 0, 3,
2211 0, 1, 0, 2, 0, 1, 0, 5,
2212 0, 1, 0, 2, 0, 1, 0, 3,
2213 0, 1, 0, 2, 0, 1, 0, 4,
2214 0, 1, 0, 2, 0, 1, 0, 3,
2215 0, 1, 0, 2, 0, 1, 0, 8
2220 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2223 * Now we also need to check the mapping array in a couple of ways.
2224 * 1) Did we move the cum-ack point?
2226 * When you first glance at this you might think that all entries that
2227 * make up the postion of the cum-ack would be in the nr-mapping
2228 * array only.. i.e. things up to the cum-ack are always
2229 * deliverable. Thats true with one exception, when its a fragmented
2230 * message we may not deliver the data until some threshold (or all
2231 * of it) is in place. So we must OR the nr_mapping_array and
2232 * mapping_array to get a true picture of the cum-ack.
2234 struct sctp_association *asoc;
2237 int slide_from, slide_end, lgap, distance;
2238 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2243 old_cumack = asoc->cumulative_tsn;
2244 old_base = asoc->mapping_array_base_tsn;
2245 old_highest = asoc->highest_tsn_inside_map;
2247 * We could probably improve this a small bit by calculating the
2248 * offset of the current cum-ack as the starting point.
2251 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2252 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2256 /* there is a 0 bit */
2257 at += sctp_map_lookup_tab[val];
2261 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2263 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2264 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2266 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2267 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2269 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2270 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2271 sctp_print_mapping_array(asoc);
2272 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2273 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2275 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2276 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2279 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2280 highest_tsn = asoc->highest_tsn_inside_nr_map;
2282 highest_tsn = asoc->highest_tsn_inside_map;
2284 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2285 /* The complete array was completed by a single FR */
2286 /* highest becomes the cum-ack */
2294 /* clear the array */
2295 clr = ((at + 7) >> 3);
2296 if (clr > asoc->mapping_array_size) {
2297 clr = asoc->mapping_array_size;
2299 memset(asoc->mapping_array, 0, clr);
2300 memset(asoc->nr_mapping_array, 0, clr);
2302 for (i = 0; i < asoc->mapping_array_size; i++) {
2303 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2304 printf("Error Mapping array's not clean at clear\n");
2305 sctp_print_mapping_array(asoc);
2309 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2310 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2311 } else if (at >= 8) {
2312 /* we can slide the mapping array down */
2313 /* slide_from holds where we hit the first NON 0xff byte */
2316 * now calculate the ceiling of the move using our highest
2319 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2320 slide_end = (lgap >> 3);
2321 if (slide_end < slide_from) {
2322 sctp_print_mapping_array(asoc);
2324 panic("impossible slide");
2326 printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2327 lgap, slide_end, slide_from, at);
2331 if (slide_end > asoc->mapping_array_size) {
2333 panic("would overrun buffer");
2335 printf("Gak, would have overrun map end:%d slide_end:%d\n",
2336 asoc->mapping_array_size, slide_end);
2337 slide_end = asoc->mapping_array_size;
2340 distance = (slide_end - slide_from) + 1;
2341 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2342 sctp_log_map(old_base, old_cumack, old_highest,
2343 SCTP_MAP_PREPARE_SLIDE);
2344 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2345 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2347 if (distance + slide_from > asoc->mapping_array_size ||
2350 * Here we do NOT slide forward the array so that
2351 * hopefully when more data comes in to fill it up
2352 * we will be able to slide it forward. Really I
2353 * don't think this should happen :-0
2356 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2357 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2358 (uint32_t) asoc->mapping_array_size,
2359 SCTP_MAP_SLIDE_NONE);
2364 for (ii = 0; ii < distance; ii++) {
2365 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2366 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2369 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2370 asoc->mapping_array[ii] = 0;
2371 asoc->nr_mapping_array[ii] = 0;
2373 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2374 asoc->highest_tsn_inside_map += (slide_from << 3);
2376 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2377 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2379 asoc->mapping_array_base_tsn += (slide_from << 3);
2380 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2381 sctp_log_map(asoc->mapping_array_base_tsn,
2382 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2383 SCTP_MAP_SLIDE_RESULT);
2390 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2392 struct sctp_association *asoc;
2393 uint32_t highest_tsn;
2396 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2397 highest_tsn = asoc->highest_tsn_inside_nr_map;
2399 highest_tsn = asoc->highest_tsn_inside_map;
2403 * Now we need to see if we need to queue a sack or just start the
2404 * timer (if allowed).
2406 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2408 * Ok special case, in SHUTDOWN-SENT case. here we maker
2409 * sure SACK timer is off and instead send a SHUTDOWN and a
2412 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2413 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2414 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2416 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2417 sctp_send_sack(stcb);
2421 /* is there a gap now ? */
2422 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2425 * CMT DAC algorithm: increase number of packets received
2428 stcb->asoc.cmt_dac_pkts_rcvd++;
2430 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2432 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2434 (stcb->asoc.numduptsns) || /* we have dup's */
2435 (is_a_gap) || /* is still a gap */
2436 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2437 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2440 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2441 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2442 (stcb->asoc.send_sack == 0) &&
2443 (stcb->asoc.numduptsns == 0) &&
2444 (stcb->asoc.delayed_ack) &&
2445 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2448 * CMT DAC algorithm: With CMT, delay acks
2449 * even in the face of
2451 * reordering. Therefore, if acks that do not
2452 * have to be sent because of the above
2453 * reasons, will be delayed. That is, acks
2454 * that would have been sent due to gap
2455 * reports will be delayed with DAC. Start
2456 * the delayed ack timer.
2458 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2459 stcb->sctp_ep, stcb, NULL);
2462 * Ok we must build a SACK since the timer
2463 * is pending, we got our first packet OR
2464 * there are gaps or duplicates.
2466 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2467 sctp_send_sack(stcb);
2470 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2471 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2472 stcb->sctp_ep, stcb, NULL);
2479 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2481 struct sctp_tmit_chunk *chk;
2482 uint32_t tsize, pd_point;
2485 if (asoc->fragmented_delivery_inprogress) {
2486 sctp_service_reassembly(stcb, asoc);
2488 /* Can we proceed further, i.e. the PD-API is complete */
2489 if (asoc->fragmented_delivery_inprogress) {
2494 * Now is there some other chunk I can deliver from the reassembly
2498 chk = TAILQ_FIRST(&asoc->reasmqueue);
2500 asoc->size_on_reasm_queue = 0;
2501 asoc->cnt_on_reasm_queue = 0;
2504 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2505 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2506 ((nxt_todel == chk->rec.data.stream_seq) ||
2507 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2509 * Yep the first one is here. We setup to start reception,
2510 * by backing down the TSN just in case we can't deliver.
2514 * Before we start though either all of the message should
2515 * be here or the socket buffer max or nothing on the
2516 * delivery queue and something can be delivered.
2518 if (stcb->sctp_socket) {
2519 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2520 stcb->sctp_ep->partial_delivery_point);
2522 pd_point = stcb->sctp_ep->partial_delivery_point;
2524 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2525 asoc->fragmented_delivery_inprogress = 1;
2526 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2527 asoc->str_of_pdapi = chk->rec.data.stream_number;
2528 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2529 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2530 asoc->fragment_flags = chk->rec.data.rcv_flags;
2531 sctp_service_reassembly(stcb, asoc);
2532 if (asoc->fragmented_delivery_inprogress == 0) {
2540 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2541 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2542 struct sctp_nets *net, uint32_t * high_tsn)
2544 struct sctp_data_chunk *ch, chunk_buf;
2545 struct sctp_association *asoc;
2546 int num_chunks = 0; /* number of control chunks processed */
2548 int chk_length, break_flag, last_chunk;
2549 int abort_flag = 0, was_a_gap;
2551 uint32_t highest_tsn;
2554 sctp_set_rwnd(stcb, &stcb->asoc);
2557 SCTP_TCB_LOCK_ASSERT(stcb);
2559 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2560 highest_tsn = asoc->highest_tsn_inside_nr_map;
2562 highest_tsn = asoc->highest_tsn_inside_map;
2564 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2566 * setup where we got the last DATA packet from for any SACK that
2567 * may need to go out. Don't bump the net. This is done ONLY when a
2568 * chunk is assigned.
2570 asoc->last_data_chunk_from = net;
2573 * Now before we proceed we must figure out if this is a wasted
2574 * cluster... i.e. it is a small packet sent in and yet the driver
2575 * underneath allocated a full cluster for it. If so we must copy it
2576 * to a smaller mbuf and free up the cluster mbuf. This will help
2577 * with cluster starvation. Note for __Panda__ we don't do this
2578 * since it has clusters all the way down to 64 bytes.
2580 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2581 /* we only handle mbufs that are singletons.. not chains */
2582 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2584 /* ok lets see if we can copy the data up */
2587 /* get the pointers and copy */
2588 to = mtod(m, caddr_t *);
2589 from = mtod((*mm), caddr_t *);
2590 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2591 /* copy the length and free up the old */
2592 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2594 /* sucess, back copy */
2597 /* We are in trouble in the mbuf world .. yikes */
2601 /* get pointer to the first chunk header */
2602 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2603 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2608 * process all DATA chunks...
2610 *high_tsn = asoc->cumulative_tsn;
2612 asoc->data_pkts_seen++;
2613 while (stop_proc == 0) {
2614 /* validate chunk length */
2615 chk_length = ntohs(ch->ch.chunk_length);
2616 if (length - *offset < chk_length) {
2617 /* all done, mutulated chunk */
2621 if (ch->ch.chunk_type == SCTP_DATA) {
2622 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2624 * Need to send an abort since we had a
2625 * invalid data chunk.
2627 struct mbuf *op_err;
2629 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2630 0, M_DONTWAIT, 1, MT_DATA);
2633 struct sctp_paramhdr *ph;
2636 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2637 (2 * sizeof(uint32_t));
2638 ph = mtod(op_err, struct sctp_paramhdr *);
2640 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2641 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2642 ippp = (uint32_t *) (ph + 1);
2643 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2645 *ippp = asoc->cumulative_tsn;
2648 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2649 sctp_abort_association(inp, stcb, m, iphlen, sh,
2650 op_err, 0, net->port);
2653 #ifdef SCTP_AUDITING_ENABLED
2654 sctp_audit_log(0xB1, 0);
2656 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2661 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2662 chk_length, net, high_tsn, &abort_flag, &break_flag,
2671 * Set because of out of rwnd space and no
2672 * drop rep space left.
2678 /* not a data chunk in the data region */
2679 switch (ch->ch.chunk_type) {
2680 case SCTP_INITIATION:
2681 case SCTP_INITIATION_ACK:
2682 case SCTP_SELECTIVE_ACK:
2683 case SCTP_NR_SELECTIVE_ACK: /* EY */
2684 case SCTP_HEARTBEAT_REQUEST:
2685 case SCTP_HEARTBEAT_ACK:
2686 case SCTP_ABORT_ASSOCIATION:
2688 case SCTP_SHUTDOWN_ACK:
2689 case SCTP_OPERATION_ERROR:
2690 case SCTP_COOKIE_ECHO:
2691 case SCTP_COOKIE_ACK:
2694 case SCTP_SHUTDOWN_COMPLETE:
2695 case SCTP_AUTHENTICATION:
2696 case SCTP_ASCONF_ACK:
2697 case SCTP_PACKET_DROPPED:
2698 case SCTP_STREAM_RESET:
2699 case SCTP_FORWARD_CUM_TSN:
2702 * Now, what do we do with KNOWN chunks that
2703 * are NOT in the right place?
2705 * For now, I do nothing but ignore them. We
2706 * may later want to add sysctl stuff to
2707 * switch out and do either an ABORT() or
2708 * possibly process them.
2710 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2711 struct mbuf *op_err;
2713 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2714 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2719 /* unknown chunk type, use bit rules */
2720 if (ch->ch.chunk_type & 0x40) {
2721 /* Add a error report to the queue */
2723 struct sctp_paramhdr *phd;
2725 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2727 phd = mtod(merr, struct sctp_paramhdr *);
2729 * We cheat and use param
2730 * type since we did not
2731 * bother to define a error
2732 * cause struct. They are
2733 * the same basic format
2734 * with different names.
2737 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2739 htons(chk_length + sizeof(*phd));
2740 SCTP_BUF_LEN(merr) = sizeof(*phd);
2741 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2742 SCTP_SIZE32(chk_length),
2744 if (SCTP_BUF_NEXT(merr)) {
2745 sctp_queue_op_err(stcb, merr);
2751 if ((ch->ch.chunk_type & 0x80) == 0) {
2752 /* discard the rest of this packet */
2754 } /* else skip this bad chunk and
2757 }; /* switch of chunk type */
2759 *offset += SCTP_SIZE32(chk_length);
2760 if ((*offset >= length) || stop_proc) {
2761 /* no more data left in the mbuf chain */
2765 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2766 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2776 * we need to report rwnd overrun drops.
2778 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2782 * Did we get data, if so update the time for auto-close and
2783 * give peer credit for being alive.
2785 SCTP_STAT_INCR(sctps_recvpktwithdata);
2786 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2787 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2788 stcb->asoc.overall_error_count,
2790 SCTP_FROM_SCTP_INDATA,
2793 stcb->asoc.overall_error_count = 0;
2794 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2796 /* now service all of the reassm queue if needed */
2797 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2798 sctp_service_queues(stcb, asoc);
2800 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2801 /* Assure that we ack right away */
2802 stcb->asoc.send_sack = 1;
2804 /* Start a sack timer or QUEUE a SACK for sending */
2805 sctp_sack_check(stcb, was_a_gap, &abort_flag);
2813 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2814 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2816 uint32_t * biggest_newly_acked_tsn,
2817 uint32_t * this_sack_lowest_newack,
2820 struct sctp_tmit_chunk *tp1;
2821 unsigned int theTSN;
2822 int j, wake_him = 0, circled = 0;
2824 /* Recover the tp1 we last saw */
2827 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2829 for (j = frag_strt; j <= frag_end; j++) {
2830 theTSN = j + last_tsn;
2832 if (tp1->rec.data.doing_fast_retransmit)
2836 * CMT: CUCv2 algorithm. For each TSN being
2837 * processed from the sent queue, track the
2838 * next expected pseudo-cumack, or
2839 * rtx_pseudo_cumack, if required. Separate
2840 * cumack trackers for first transmissions,
2841 * and retransmissions.
2843 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2844 (tp1->snd_count == 1)) {
2845 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2846 tp1->whoTo->find_pseudo_cumack = 0;
2848 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2849 (tp1->snd_count > 1)) {
2850 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2851 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2853 if (tp1->rec.data.TSN_seq == theTSN) {
2854 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2856 * must be held until
2859 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2861 * If it is less than RESEND, it is
2862 * now no-longer in flight.
2863 * Higher values may already be set
2864 * via previous Gap Ack Blocks...
2865 * i.e. ACKED or RESEND.
2867 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2868 *biggest_newly_acked_tsn)) {
2869 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2872 * CMT: SFR algo (and HTNA) - set
2873 * saw_newack to 1 for dest being
2874 * newly acked. update
2875 * this_sack_highest_newack if
2878 if (tp1->rec.data.chunk_was_revoked == 0)
2879 tp1->whoTo->saw_newack = 1;
2881 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2882 tp1->whoTo->this_sack_highest_newack)) {
2883 tp1->whoTo->this_sack_highest_newack =
2884 tp1->rec.data.TSN_seq;
2887 * CMT DAC algo: also update
2888 * this_sack_lowest_newack
2890 if (*this_sack_lowest_newack == 0) {
2891 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2892 sctp_log_sack(*this_sack_lowest_newack,
2894 tp1->rec.data.TSN_seq,
2897 SCTP_LOG_TSN_ACKED);
2899 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2902 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2903 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2904 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2905 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2906 * Separate pseudo_cumack trackers for first transmissions and
2909 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2910 if (tp1->rec.data.chunk_was_revoked == 0) {
2911 tp1->whoTo->new_pseudo_cumack = 1;
2913 tp1->whoTo->find_pseudo_cumack = 1;
2915 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2916 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2918 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2919 if (tp1->rec.data.chunk_was_revoked == 0) {
2920 tp1->whoTo->new_pseudo_cumack = 1;
2922 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2924 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2925 sctp_log_sack(*biggest_newly_acked_tsn,
2927 tp1->rec.data.TSN_seq,
2930 SCTP_LOG_TSN_ACKED);
2932 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2933 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2934 tp1->whoTo->flight_size,
2936 (uintptr_t) tp1->whoTo,
2937 tp1->rec.data.TSN_seq);
2939 sctp_flight_size_decrease(tp1);
2940 sctp_total_flight_decrease(stcb, tp1);
2942 tp1->whoTo->net_ack += tp1->send_size;
2943 if (tp1->snd_count < 2) {
2945 * True non-retransmited chunk
2947 tp1->whoTo->net_ack2 += tp1->send_size;
2954 sctp_calculate_rto(stcb,
2957 &tp1->sent_rcv_time,
2958 sctp_align_safe_nocopy);
2963 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2964 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2965 stcb->asoc.this_sack_highest_gap)) {
2966 stcb->asoc.this_sack_highest_gap =
2967 tp1->rec.data.TSN_seq;
2969 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2970 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2971 #ifdef SCTP_AUDITING_ENABLED
2972 sctp_audit_log(0xB2,
2973 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2978 * All chunks NOT UNSENT fall through here and are marked
2979 * (leave PR-SCTP ones that are to skip alone though)
2981 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
2982 tp1->sent = SCTP_DATAGRAM_MARKED;
2984 if (tp1->rec.data.chunk_was_revoked) {
2985 /* deflate the cwnd */
2986 tp1->whoTo->cwnd -= tp1->book_size;
2987 tp1->rec.data.chunk_was_revoked = 0;
2989 /* NR Sack code here */
2996 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2997 sctp_m_freem(tp1->data);
3004 } /* if (tp1->TSN_seq == theTSN) */
3005 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3008 tp1 = TAILQ_NEXT(tp1, sctp_next);
3009 if ((tp1 == NULL) && (circled == 0)) {
3011 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3013 } /* end while (tp1) */
3016 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3018 /* In case the fragments were not in order we must reset */
3019 } /* end for (j = fragStart */
3021 return (wake_him); /* Return value only used for nr-sack */
3026 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3027 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3028 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3029 int num_seg, int num_nr_seg, int *ecn_seg_sums)
3031 struct sctp_gap_ack_block *frag, block;
3032 struct sctp_tmit_chunk *tp1;
3037 uint16_t frag_strt, frag_end, prev_frag_end;
3039 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3043 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3046 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3048 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3049 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3050 *offset += sizeof(block);
3052 return (chunk_freed);
3054 frag_strt = ntohs(frag->start);
3055 frag_end = ntohs(frag->end);
3057 if (frag_strt > frag_end) {
3058 /* This gap report is malformed, skip it. */
3061 if (frag_strt <= prev_frag_end) {
3062 /* This gap report is not in order, so restart. */
3063 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3065 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3066 *biggest_tsn_acked = last_tsn + frag_end;
3073 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3074 non_revocable, &num_frs, biggest_newly_acked_tsn,
3075 this_sack_lowest_newack, ecn_seg_sums)) {
3078 prev_frag_end = frag_end;
3080 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3082 sctp_log_fr(*biggest_tsn_acked,
3083 *biggest_newly_acked_tsn,
3084 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3086 return (chunk_freed);
3090 sctp_check_for_revoked(struct sctp_tcb *stcb,
3091 struct sctp_association *asoc, uint32_t cumack,
3092 uint32_t biggest_tsn_acked)
3094 struct sctp_tmit_chunk *tp1;
3095 int tot_revoked = 0;
3097 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3098 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3100 * ok this guy is either ACK or MARKED. If it is
3101 * ACKED it has been previously acked but not this
3102 * time i.e. revoked. If it is MARKED it was ACK'ed
3105 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3108 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3109 /* it has been revoked */
3110 tp1->sent = SCTP_DATAGRAM_SENT;
3111 tp1->rec.data.chunk_was_revoked = 1;
3113 * We must add this stuff back in to assure
3114 * timers and such get started.
3116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3117 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3118 tp1->whoTo->flight_size,
3120 (uintptr_t) tp1->whoTo,
3121 tp1->rec.data.TSN_seq);
3123 sctp_flight_size_increase(tp1);
3124 sctp_total_flight_increase(stcb, tp1);
3126 * We inflate the cwnd to compensate for our
3127 * artificial inflation of the flight_size.
3129 tp1->whoTo->cwnd += tp1->book_size;
3131 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3132 sctp_log_sack(asoc->last_acked_seq,
3134 tp1->rec.data.TSN_seq,
3137 SCTP_LOG_TSN_REVOKED);
3139 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3140 /* it has been re-acked in this SACK */
3141 tp1->sent = SCTP_DATAGRAM_ACKED;
3144 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3151 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3152 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3154 struct sctp_tmit_chunk *tp1;
3155 int strike_flag = 0;
3157 int tot_retrans = 0;
3158 uint32_t sending_seq;
3159 struct sctp_nets *net;
3160 int num_dests_sacked = 0;
3163 * select the sending_seq, this is either the next thing ready to be
3164 * sent but not transmitted, OR, the next seq we assign.
3166 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3168 sending_seq = asoc->sending_seq;
3170 sending_seq = tp1->rec.data.TSN_seq;
3173 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3174 if ((asoc->sctp_cmt_on_off > 0) &&
3175 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3176 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3177 if (net->saw_newack)
3181 if (stcb->asoc.peer_supports_prsctp) {
3182 (void)SCTP_GETTIME_TIMEVAL(&now);
3184 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3186 if (tp1->no_fr_allowed) {
3187 /* this one had a timeout or something */
3190 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3191 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3192 sctp_log_fr(biggest_tsn_newly_acked,
3193 tp1->rec.data.TSN_seq,
3195 SCTP_FR_LOG_CHECK_STRIKE);
3197 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3198 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3202 if (stcb->asoc.peer_supports_prsctp) {
3203 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3204 /* Is it expired? */
3205 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3206 /* Yes so drop it */
3207 if (tp1->data != NULL) {
3208 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3209 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3210 SCTP_SO_NOT_LOCKED);
3216 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3217 /* we are beyond the tsn in the sack */
3220 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3221 /* either a RESEND, ACKED, or MARKED */
3223 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3224 /* Continue strikin FWD-TSN chunks */
3225 tp1->rec.data.fwd_tsn_cnt++;
3230 * CMT : SFR algo (covers part of DAC and HTNA as well)
3232 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3234 * No new acks were receieved for data sent to this
3235 * dest. Therefore, according to the SFR algo for
3236 * CMT, no data sent to this dest can be marked for
3237 * FR using this SACK.
3240 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3241 tp1->whoTo->this_sack_highest_newack)) {
3243 * CMT: New acks were receieved for data sent to
3244 * this dest. But no new acks were seen for data
3245 * sent after tp1. Therefore, according to the SFR
3246 * algo for CMT, tp1 cannot be marked for FR using
3247 * this SACK. This step covers part of the DAC algo
3248 * and the HTNA algo as well.
3253 * Here we check to see if we were have already done a FR
3254 * and if so we see if the biggest TSN we saw in the sack is
3255 * smaller than the recovery point. If so we don't strike
3256 * the tsn... otherwise we CAN strike the TSN.
3259 * @@@ JRI: Check for CMT if (accum_moved &&
3260 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3263 if (accum_moved && asoc->fast_retran_loss_recovery) {
3265 * Strike the TSN if in fast-recovery and cum-ack
3268 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3269 sctp_log_fr(biggest_tsn_newly_acked,
3270 tp1->rec.data.TSN_seq,
3272 SCTP_FR_LOG_STRIKE_CHUNK);
3274 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3277 if ((asoc->sctp_cmt_on_off > 0) &&
3278 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3280 * CMT DAC algorithm: If SACK flag is set to
3281 * 0, then lowest_newack test will not pass
3282 * because it would have been set to the
3283 * cumack earlier. If not already to be
3284 * rtx'd, If not a mixed sack and if tp1 is
3285 * not between two sacked TSNs, then mark by
3286 * one more. NOTE that we are marking by one
3287 * additional time since the SACK DAC flag
3288 * indicates that two packets have been
3289 * received after this missing TSN.
3291 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3292 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3293 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3294 sctp_log_fr(16 + num_dests_sacked,
3295 tp1->rec.data.TSN_seq,
3297 SCTP_FR_LOG_STRIKE_CHUNK);
3302 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3303 (asoc->sctp_cmt_on_off == 0)) {
3305 * For those that have done a FR we must take
3306 * special consideration if we strike. I.e the
3307 * biggest_newly_acked must be higher than the
3308 * sending_seq at the time we did the FR.
3311 #ifdef SCTP_FR_TO_ALTERNATE
3313 * If FR's go to new networks, then we must only do
3314 * this for singly homed asoc's. However if the FR's
3315 * go to the same network (Armando's work) then its
3316 * ok to FR multiple times.
3324 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3325 tp1->rec.data.fast_retran_tsn)) {
3327 * Strike the TSN, since this ack is
3328 * beyond where things were when we
3331 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3332 sctp_log_fr(biggest_tsn_newly_acked,
3333 tp1->rec.data.TSN_seq,
3335 SCTP_FR_LOG_STRIKE_CHUNK);
3337 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3341 if ((asoc->sctp_cmt_on_off > 0) &&
3342 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3344 * CMT DAC algorithm: If
3345 * SACK flag is set to 0,
3346 * then lowest_newack test
3347 * will not pass because it
3348 * would have been set to
3349 * the cumack earlier. If
3350 * not already to be rtx'd,
3351 * If not a mixed sack and
3352 * if tp1 is not between two
3353 * sacked TSNs, then mark by
3354 * one more. NOTE that we
3355 * are marking by one
3356 * additional time since the
3357 * SACK DAC flag indicates
3358 * that two packets have
3359 * been received after this
3362 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3363 (num_dests_sacked == 1) &&
3364 SCTP_TSN_GT(this_sack_lowest_newack,
3365 tp1->rec.data.TSN_seq)) {
3366 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3367 sctp_log_fr(32 + num_dests_sacked,
3368 tp1->rec.data.TSN_seq,
3370 SCTP_FR_LOG_STRIKE_CHUNK);
3372 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3380 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3383 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3384 biggest_tsn_newly_acked)) {
3386 * We don't strike these: This is the HTNA
3387 * algorithm i.e. we don't strike If our TSN is
3388 * larger than the Highest TSN Newly Acked.
3392 /* Strike the TSN */
3393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3394 sctp_log_fr(biggest_tsn_newly_acked,
3395 tp1->rec.data.TSN_seq,
3397 SCTP_FR_LOG_STRIKE_CHUNK);
3399 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3402 if ((asoc->sctp_cmt_on_off > 0) &&
3403 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3405 * CMT DAC algorithm: If SACK flag is set to
3406 * 0, then lowest_newack test will not pass
3407 * because it would have been set to the
3408 * cumack earlier. If not already to be
3409 * rtx'd, If not a mixed sack and if tp1 is
3410 * not between two sacked TSNs, then mark by
3411 * one more. NOTE that we are marking by one
3412 * additional time since the SACK DAC flag
3413 * indicates that two packets have been
3414 * received after this missing TSN.
3416 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3417 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3418 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3419 sctp_log_fr(48 + num_dests_sacked,
3420 tp1->rec.data.TSN_seq,
3422 SCTP_FR_LOG_STRIKE_CHUNK);
3428 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3429 struct sctp_nets *alt;
3431 /* fix counts and things */
3432 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3433 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3434 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3436 (uintptr_t) tp1->whoTo,
3437 tp1->rec.data.TSN_seq);
3440 tp1->whoTo->net_ack++;
3441 sctp_flight_size_decrease(tp1);
3443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3444 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3445 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3447 /* add back to the rwnd */
3448 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3450 /* remove from the total flight */
3451 sctp_total_flight_decrease(stcb, tp1);
3453 if ((stcb->asoc.peer_supports_prsctp) &&
3454 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3456 * Has it been retransmitted tv_sec times? -
3457 * we store the retran count there.
3459 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3460 /* Yes, so drop it */
3461 if (tp1->data != NULL) {
3462 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3463 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3464 SCTP_SO_NOT_LOCKED);
3466 /* Make sure to flag we had a FR */
3467 tp1->whoTo->net_ack++;
3471 /* printf("OK, we are now ready to FR this guy\n"); */
3472 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3473 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3477 /* This is a subsequent FR */
3478 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3480 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3481 if (asoc->sctp_cmt_on_off > 0) {
3483 * CMT: Using RTX_SSTHRESH policy for CMT.
3484 * If CMT is being used, then pick dest with
3485 * largest ssthresh for any retransmission.
3487 tp1->no_fr_allowed = 1;
3489 /* sa_ignore NO_NULL_CHK */
3490 if (asoc->sctp_cmt_pf > 0) {
3492 * JRS 5/18/07 - If CMT PF is on,
3493 * use the PF version of
3496 alt = sctp_find_alternate_net(stcb, alt, 2);
3499 * JRS 5/18/07 - If only CMT is on,
3500 * use the CMT version of
3503 /* sa_ignore NO_NULL_CHK */
3504 alt = sctp_find_alternate_net(stcb, alt, 1);
3510 * CUCv2: If a different dest is picked for
3511 * the retransmission, then new
3512 * (rtx-)pseudo_cumack needs to be tracked
3513 * for orig dest. Let CUCv2 track new (rtx-)
3514 * pseudo-cumack always.
3517 tp1->whoTo->find_pseudo_cumack = 1;
3518 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3520 } else {/* CMT is OFF */
3522 #ifdef SCTP_FR_TO_ALTERNATE
3523 /* Can we find an alternate? */
3524 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3527 * default behavior is to NOT retransmit
3528 * FR's to an alternate. Armando Caro's
3529 * paper details why.
3535 tp1->rec.data.doing_fast_retransmit = 1;
3537 /* mark the sending seq for possible subsequent FR's */
3539 * printf("Marking TSN for FR new value %x\n",
3540 * (uint32_t)tpi->rec.data.TSN_seq);
3542 if (TAILQ_EMPTY(&asoc->send_queue)) {
3544 * If the queue of send is empty then its
3545 * the next sequence number that will be
3546 * assigned so we subtract one from this to
3547 * get the one we last sent.
3549 tp1->rec.data.fast_retran_tsn = sending_seq;
3552 * If there are chunks on the send queue
3553 * (unsent data that has made it from the
3554 * stream queues but not out the door, we
3555 * take the first one (which will have the
3556 * lowest TSN) and subtract one to get the
3559 struct sctp_tmit_chunk *ttt;
3561 ttt = TAILQ_FIRST(&asoc->send_queue);
3562 tp1->rec.data.fast_retran_tsn =
3563 ttt->rec.data.TSN_seq;
3568 * this guy had a RTO calculation pending on
3573 if (alt != tp1->whoTo) {
3574 /* yes, there is an alternate. */
3575 sctp_free_remote_addr(tp1->whoTo);
3576 /* sa_ignore FREED_MEMORY */
3578 atomic_add_int(&alt->ref_count, 1);
3584 struct sctp_tmit_chunk *
3585 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3586 struct sctp_association *asoc)
3588 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3592 if (asoc->peer_supports_prsctp == 0) {
3595 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3596 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3597 tp1->sent != SCTP_DATAGRAM_RESEND) {
3598 /* no chance to advance, out of here */
3601 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3602 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3603 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3604 asoc->advanced_peer_ack_point,
3605 tp1->rec.data.TSN_seq, 0, 0);
3608 if (!PR_SCTP_ENABLED(tp1->flags)) {
3610 * We can't fwd-tsn past any that are reliable aka
3611 * retransmitted until the asoc fails.
3616 (void)SCTP_GETTIME_TIMEVAL(&now);
3620 * now we got a chunk which is marked for another
3621 * retransmission to a PR-stream but has run out its chances
3622 * already maybe OR has been marked to skip now. Can we skip
3623 * it if its a resend?
3625 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3626 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3628 * Now is this one marked for resend and its time is
3631 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3632 /* Yes so drop it */
3634 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3635 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3636 SCTP_SO_NOT_LOCKED);
3640 * No, we are done when hit one for resend
3641 * whos time as not expired.
3647 * Ok now if this chunk is marked to drop it we can clean up
3648 * the chunk, advance our peer ack point and we can check
3651 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3652 /* advance PeerAckPoint goes forward */
3653 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3654 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3656 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3657 /* No update but we do save the chk */
3662 * If it is still in RESEND we can advance no
3672 sctp_fs_audit(struct sctp_association *asoc)
3674 struct sctp_tmit_chunk *chk;
3675 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3676 int entry_flight, entry_cnt, ret;
3678 entry_flight = asoc->total_flight;
3679 entry_cnt = asoc->total_flight_count;
3682 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3685 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3686 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3687 printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3688 chk->rec.data.TSN_seq,
3693 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3695 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3697 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3704 if ((inflight > 0) || (inbetween > 0)) {
3706 panic("Flight size-express incorrect? \n");
3708 printf("asoc->total_flight:%d cnt:%d\n",
3709 entry_flight, entry_cnt);
3711 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3712 inflight, inbetween, resend, above, acked);
3721 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3722 struct sctp_association *asoc,
3723 struct sctp_nets *net,
3724 struct sctp_tmit_chunk *tp1)
3726 tp1->window_probe = 0;
3727 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3728 /* TSN's skipped we do NOT move back. */
3729 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3730 tp1->whoTo->flight_size,
3732 (uintptr_t) tp1->whoTo,
3733 tp1->rec.data.TSN_seq);
3736 /* First setup this by shrinking flight */
3737 sctp_flight_size_decrease(tp1);
3738 sctp_total_flight_decrease(stcb, tp1);
3739 /* Now mark for resend */
3740 tp1->sent = SCTP_DATAGRAM_RESEND;
3741 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3743 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3744 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3745 tp1->whoTo->flight_size,
3747 (uintptr_t) tp1->whoTo,
3748 tp1->rec.data.TSN_seq);
3753 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3754 uint32_t rwnd, int *abort_now)
3756 struct sctp_nets *net;
3757 struct sctp_association *asoc;
3758 struct sctp_tmit_chunk *tp1, *tp2;
3760 int win_probe_recovery = 0;
3761 int win_probe_recovered = 0;
3762 int j, done_once = 0;
3764 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3765 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3766 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3768 SCTP_TCB_LOCK_ASSERT(stcb);
3769 #ifdef SCTP_ASOCLOG_OF_TSNS
3770 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3771 stcb->asoc.cumack_log_at++;
3772 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3773 stcb->asoc.cumack_log_at = 0;
3777 old_rwnd = asoc->peers_rwnd;
3778 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3781 } else if (asoc->last_acked_seq == cumack) {
3782 /* Window update sack */
3783 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3784 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3785 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3786 /* SWS sender side engages */
3787 asoc->peers_rwnd = 0;
3789 if (asoc->peers_rwnd > old_rwnd) {
3794 /* First setup for CC stuff */
3795 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3796 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3797 /* Drag along the window_tsn for cwr's */
3798 net->cwr_window_tsn = cumack;
3800 net->prev_cwnd = net->cwnd;
3805 * CMT: Reset CUC and Fast recovery algo variables before
3808 net->new_pseudo_cumack = 0;
3809 net->will_exit_fast_recovery = 0;
3811 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3814 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3815 tp1 = TAILQ_LAST(&asoc->sent_queue,
3816 sctpchunk_listhead);
3817 send_s = tp1->rec.data.TSN_seq + 1;
3819 send_s = asoc->sending_seq;
3821 if (SCTP_TSN_GE(cumack, send_s)) {
3827 panic("Impossible sack 1");
3832 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3833 0, M_DONTWAIT, 1, MT_DATA);
3835 struct sctp_paramhdr *ph;
3838 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3840 ph = mtod(oper, struct sctp_paramhdr *);
3841 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3842 ph->param_length = htons(SCTP_BUF_LEN(oper));
3843 ippp = (uint32_t *) (ph + 1);
3844 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3846 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3847 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3852 asoc->this_sack_highest_gap = cumack;
3853 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3854 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3855 stcb->asoc.overall_error_count,
3857 SCTP_FROM_SCTP_INDATA,
3860 stcb->asoc.overall_error_count = 0;
3861 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3862 /* process the new consecutive TSN first */
3863 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3864 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3865 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3866 printf("Warning, an unsent is now acked?\n");
3868 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3870 * If it is less than ACKED, it is
3871 * now no-longer in flight. Higher
3872 * values may occur during marking
3874 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3875 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3876 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3877 tp1->whoTo->flight_size,
3879 (uintptr_t) tp1->whoTo,
3880 tp1->rec.data.TSN_seq);
3882 sctp_flight_size_decrease(tp1);
3883 /* sa_ignore NO_NULL_CHK */
3884 sctp_total_flight_decrease(stcb, tp1);
3886 tp1->whoTo->net_ack += tp1->send_size;
3887 if (tp1->snd_count < 2) {
3889 * True non-retransmited
3892 tp1->whoTo->net_ack2 +=
3895 /* update RTO too? */
3902 sctp_calculate_rto(stcb,
3904 &tp1->sent_rcv_time,
3905 sctp_align_safe_nocopy);
3910 * CMT: CUCv2 algorithm. From the
3911 * cumack'd TSNs, for each TSN being
3912 * acked for the first time, set the
3913 * following variables for the
3914 * corresp destination.
3915 * new_pseudo_cumack will trigger a
3917 * find_(rtx_)pseudo_cumack will
3918 * trigger search for the next
3919 * expected (rtx-)pseudo-cumack.
3921 tp1->whoTo->new_pseudo_cumack = 1;
3922 tp1->whoTo->find_pseudo_cumack = 1;
3923 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3925 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3926 /* sa_ignore NO_NULL_CHK */
3927 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3930 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3931 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3933 if (tp1->rec.data.chunk_was_revoked) {
3934 /* deflate the cwnd */
3935 tp1->whoTo->cwnd -= tp1->book_size;
3936 tp1->rec.data.chunk_was_revoked = 0;
3938 tp1->sent = SCTP_DATAGRAM_ACKED;
3939 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3941 /* sa_ignore NO_NULL_CHK */
3942 sctp_free_bufspace(stcb, asoc, tp1, 1);
3943 sctp_m_freem(tp1->data);
3946 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3947 sctp_log_sack(asoc->last_acked_seq,
3949 tp1->rec.data.TSN_seq,
3952 SCTP_LOG_FREE_SENT);
3954 asoc->sent_queue_cnt--;
3955 sctp_free_a_chunk(stcb, tp1);
3962 /* sa_ignore NO_NULL_CHK */
3963 if (stcb->sctp_socket) {
3964 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3968 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3969 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3970 /* sa_ignore NO_NULL_CHK */
3971 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
3973 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3974 so = SCTP_INP_SO(stcb->sctp_ep);
3975 atomic_add_int(&stcb->asoc.refcnt, 1);
3976 SCTP_TCB_UNLOCK(stcb);
3977 SCTP_SOCKET_LOCK(so, 1);
3978 SCTP_TCB_LOCK(stcb);
3979 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3980 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3981 /* assoc was freed while we were unlocked */
3982 SCTP_SOCKET_UNLOCK(so, 1);
3986 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3987 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3988 SCTP_SOCKET_UNLOCK(so, 1);
3991 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3992 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
3996 /* JRS - Use the congestion control given in the CC module */
3997 if (asoc->last_acked_seq != cumack)
3998 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4000 asoc->last_acked_seq = cumack;
4002 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4003 /* nothing left in-flight */
4004 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4005 net->flight_size = 0;
4006 net->partial_bytes_acked = 0;
4008 asoc->total_flight = 0;
4009 asoc->total_flight_count = 0;
4012 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4013 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4014 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4015 /* SWS sender side engages */
4016 asoc->peers_rwnd = 0;
4018 if (asoc->peers_rwnd > old_rwnd) {
4019 win_probe_recovery = 1;
4021 /* Now assure a timer where data is queued at */
4024 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4027 if (win_probe_recovery && (net->window_probe)) {
4028 win_probe_recovered = 1;
4030 * Find first chunk that was used with window probe
4031 * and clear the sent
4033 /* sa_ignore FREED_MEMORY */
4034 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4035 if (tp1->window_probe) {
4036 /* move back to data send queue */
4037 sctp_window_probe_recovery(stcb, asoc, net, tp1);
4042 if (net->RTO == 0) {
4043 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4045 to_ticks = MSEC_TO_TICKS(net->RTO);
4047 if (net->flight_size) {
4049 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4050 sctp_timeout_handler, &net->rxt_timer);
4051 if (net->window_probe) {
4052 net->window_probe = 0;
4055 if (net->window_probe) {
4057 * In window probes we must assure a timer
4058 * is still running there
4060 net->window_probe = 0;
4061 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4062 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4063 sctp_timeout_handler, &net->rxt_timer);
4065 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4066 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4068 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4070 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4071 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4072 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4073 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4074 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4080 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4081 (asoc->sent_queue_retran_cnt == 0) &&
4082 (win_probe_recovered == 0) &&
4085 * huh, this should not happen unless all packets are
4086 * PR-SCTP and marked to skip of course.
4088 if (sctp_fs_audit(asoc)) {
4089 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4090 net->flight_size = 0;
4092 asoc->total_flight = 0;
4093 asoc->total_flight_count = 0;
4094 asoc->sent_queue_retran_cnt = 0;
4095 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4096 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4097 sctp_flight_size_increase(tp1);
4098 sctp_total_flight_increase(stcb, tp1);
4099 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4100 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4107 /**********************************/
4108 /* Now what about shutdown issues */
4109 /**********************************/
4110 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4111 /* nothing left on sendqueue.. consider done */
4113 if ((asoc->stream_queue_cnt == 1) &&
4114 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4115 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4116 (asoc->locked_on_sending)
4118 struct sctp_stream_queue_pending *sp;
4121 * I may be in a state where we got all across.. but
4122 * cannot write more due to a shutdown... we abort
4123 * since the user did not indicate EOR in this case.
4124 * The sp will be cleaned during free of the asoc.
4126 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4128 if ((sp) && (sp->length == 0)) {
4129 /* Let cleanup code purge it */
4130 if (sp->msg_is_complete) {
4131 asoc->stream_queue_cnt--;
4133 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4134 asoc->locked_on_sending = NULL;
4135 asoc->stream_queue_cnt--;
4139 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4140 (asoc->stream_queue_cnt == 0)) {
4141 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4142 /* Need to abort here */
4148 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4149 0, M_DONTWAIT, 1, MT_DATA);
4151 struct sctp_paramhdr *ph;
4154 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4156 ph = mtod(oper, struct sctp_paramhdr *);
4157 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4158 ph->param_length = htons(SCTP_BUF_LEN(oper));
4159 ippp = (uint32_t *) (ph + 1);
4160 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4162 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4163 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4165 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4166 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4167 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4169 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4170 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4171 sctp_stop_timers_for_shutdown(stcb);
4172 sctp_send_shutdown(stcb,
4173 stcb->asoc.primary_destination);
4174 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4175 stcb->sctp_ep, stcb, asoc->primary_destination);
4176 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4177 stcb->sctp_ep, stcb, asoc->primary_destination);
4179 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4180 (asoc->stream_queue_cnt == 0)) {
4181 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4184 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4185 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4186 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4187 sctp_send_shutdown_ack(stcb,
4188 stcb->asoc.primary_destination);
4189 sctp_stop_timers_for_shutdown(stcb);
4190 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4191 stcb->sctp_ep, stcb, asoc->primary_destination);
4194 /*********************************************/
4195 /* Here we perform PR-SCTP procedures */
4197 /*********************************************/
4198 /* C1. update advancedPeerAckPoint */
4199 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4200 asoc->advanced_peer_ack_point = cumack;
4202 /* PR-Sctp issues need to be addressed too */
4203 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4204 struct sctp_tmit_chunk *lchk;
4205 uint32_t old_adv_peer_ack_point;
4207 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4208 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4209 /* C3. See if we need to send a Fwd-TSN */
4210 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4212 * ISSUE with ECN, see FWD-TSN processing.
4214 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4215 send_forward_tsn(stcb, asoc);
4217 /* try to FR fwd-tsn's that get lost too */
4218 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4219 send_forward_tsn(stcb, asoc);
4224 /* Assure a timer is up */
4225 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4226 stcb->sctp_ep, stcb, lchk->whoTo);
4229 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4230 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4232 stcb->asoc.peers_rwnd,
4233 stcb->asoc.total_flight,
4234 stcb->asoc.total_output_queue_size);
4239 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4240 struct sctp_tcb *stcb, struct sctp_nets *net_from,
4241 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4242 int *abort_now, uint8_t flags,
4243 uint32_t cum_ack, uint32_t rwnd)
4245 struct sctp_association *asoc;
4246 struct sctp_tmit_chunk *tp1, *tp2;
4247 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4248 uint32_t sav_cum_ack;
4249 uint16_t wake_him = 0;
4250 uint32_t send_s = 0;
4252 int accum_moved = 0;
4253 int will_exit_fast_recovery = 0;
4254 uint32_t a_rwnd, old_rwnd;
4255 int win_probe_recovery = 0;
4256 int win_probe_recovered = 0;
4257 struct sctp_nets *net = NULL;
4258 int ecn_seg_sums = 0;
4260 uint8_t reneged_all = 0;
4261 uint8_t cmt_dac_flag;
4264 * we take any chance we can to service our queues since we cannot
4265 * get awoken when the socket is read from :<
4268 * Now perform the actual SACK handling: 1) Verify that it is not an
4269 * old sack, if so discard. 2) If there is nothing left in the send
4270 * queue (cum-ack is equal to last acked) then you have a duplicate
4271 * too, update any rwnd change and verify no timers are running.
4272 * then return. 3) Process any new consequtive data i.e. cum-ack
4273 * moved process these first and note that it moved. 4) Process any
4274 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4275 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4276 * sync up flightsizes and things, stop all timers and also check
4277 * for shutdown_pending state. If so then go ahead and send off the
4278 * shutdown. If in shutdown recv, send off the shutdown-ack and
4279 * start that timer, Ret. 9) Strike any non-acked things and do FR
4280 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4281 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4282 * if in shutdown_recv state.
4284 SCTP_TCB_LOCK_ASSERT(stcb);
4286 this_sack_lowest_newack = 0;
4288 SCTP_STAT_INCR(sctps_slowpath_sack);
4290 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4291 #ifdef SCTP_ASOCLOG_OF_TSNS
4292 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4293 stcb->asoc.cumack_log_at++;
4294 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4295 stcb->asoc.cumack_log_at = 0;
4300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4301 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4302 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4304 old_rwnd = stcb->asoc.peers_rwnd;
4305 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4306 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4307 stcb->asoc.overall_error_count,
4309 SCTP_FROM_SCTP_INDATA,
4312 stcb->asoc.overall_error_count = 0;
4314 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4315 sctp_log_sack(asoc->last_acked_seq,
4322 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4324 uint32_t *dupdata, dblock;
4326 for (i = 0; i < num_dup; i++) {
4327 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4328 sizeof(uint32_t), (uint8_t *) & dblock);
4329 if (dupdata == NULL) {
4332 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4335 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4337 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4338 tp1 = TAILQ_LAST(&asoc->sent_queue,
4339 sctpchunk_listhead);
4340 send_s = tp1->rec.data.TSN_seq + 1;
4343 send_s = asoc->sending_seq;
4345 if (SCTP_TSN_GE(cum_ack, send_s)) {
4349 * no way, we have not even sent this TSN out yet.
4350 * Peer is hopelessly messed up with us.
4352 printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4355 printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4356 tp1->rec.data.TSN_seq, tp1);
4361 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4362 0, M_DONTWAIT, 1, MT_DATA);
4364 struct sctp_paramhdr *ph;
4367 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4369 ph = mtod(oper, struct sctp_paramhdr *);
4370 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4371 ph->param_length = htons(SCTP_BUF_LEN(oper));
4372 ippp = (uint32_t *) (ph + 1);
4373 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4375 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4376 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4380 /**********************/
4381 /* 1) check the range */
4382 /**********************/
4383 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4384 /* acking something behind */
4387 sav_cum_ack = asoc->last_acked_seq;
4389 /* update the Rwnd of the peer */
4390 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4391 TAILQ_EMPTY(&asoc->send_queue) &&
4392 (asoc->stream_queue_cnt == 0)) {
4393 /* nothing left on send/sent and strmq */
4394 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4395 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4396 asoc->peers_rwnd, 0, 0, a_rwnd);
4398 asoc->peers_rwnd = a_rwnd;
4399 if (asoc->sent_queue_retran_cnt) {
4400 asoc->sent_queue_retran_cnt = 0;
4402 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4403 /* SWS sender side engages */
4404 asoc->peers_rwnd = 0;
4406 /* stop any timers */
4407 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4408 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4409 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4410 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4411 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4412 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4413 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4414 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4417 net->partial_bytes_acked = 0;
4418 net->flight_size = 0;
4420 asoc->total_flight = 0;
4421 asoc->total_flight_count = 0;
4425 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4426 * things. The total byte count acked is tracked in netAckSz AND
4427 * netAck2 is used to track the total bytes acked that are un-
4428 * amibguious and were never retransmitted. We track these on a per
4429 * destination address basis.
4431 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4432 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4433 /* Drag along the window_tsn for cwr's */
4434 net->cwr_window_tsn = cum_ack;
4436 net->prev_cwnd = net->cwnd;
4441 * CMT: Reset CUC and Fast recovery algo variables before
4444 net->new_pseudo_cumack = 0;
4445 net->will_exit_fast_recovery = 0;
4447 /* process the new consecutive TSN first */
4448 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4449 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4450 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4452 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4454 * If it is less than ACKED, it is
4455 * now no-longer in flight. Higher
4456 * values may occur during marking
4458 if ((tp1->whoTo->dest_state &
4459 SCTP_ADDR_UNCONFIRMED) &&
4460 (tp1->snd_count < 2)) {
4462 * If there was no retran
4463 * and the address is
4464 * un-confirmed and we sent
4466 * sacked.. its confirmed,
4469 tp1->whoTo->dest_state &=
4470 ~SCTP_ADDR_UNCONFIRMED;
4472 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4473 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4474 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4475 tp1->whoTo->flight_size,
4477 (uintptr_t) tp1->whoTo,
4478 tp1->rec.data.TSN_seq);
4480 sctp_flight_size_decrease(tp1);
4481 sctp_total_flight_decrease(stcb, tp1);
4483 tp1->whoTo->net_ack += tp1->send_size;
4485 /* CMT SFR and DAC algos */
4486 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4487 tp1->whoTo->saw_newack = 1;
4489 if (tp1->snd_count < 2) {
4491 * True non-retransmited
4494 tp1->whoTo->net_ack2 +=
4497 /* update RTO too? */
4500 sctp_calculate_rto(stcb,
4502 &tp1->sent_rcv_time,
4503 sctp_align_safe_nocopy);
4508 * CMT: CUCv2 algorithm. From the
4509 * cumack'd TSNs, for each TSN being
4510 * acked for the first time, set the
4511 * following variables for the
4512 * corresp destination.
4513 * new_pseudo_cumack will trigger a
4515 * find_(rtx_)pseudo_cumack will
4516 * trigger search for the next
4517 * expected (rtx-)pseudo-cumack.
4519 tp1->whoTo->new_pseudo_cumack = 1;
4520 tp1->whoTo->find_pseudo_cumack = 1;
4521 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4524 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4525 sctp_log_sack(asoc->last_acked_seq,
4527 tp1->rec.data.TSN_seq,
4530 SCTP_LOG_TSN_ACKED);
4532 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4533 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4536 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4537 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4538 #ifdef SCTP_AUDITING_ENABLED
4539 sctp_audit_log(0xB3,
4540 (asoc->sent_queue_retran_cnt & 0x000000ff));
4543 if (tp1->rec.data.chunk_was_revoked) {
4544 /* deflate the cwnd */
4545 tp1->whoTo->cwnd -= tp1->book_size;
4546 tp1->rec.data.chunk_was_revoked = 0;
4548 tp1->sent = SCTP_DATAGRAM_ACKED;
4554 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4555 /* always set this up to cum-ack */
4556 asoc->this_sack_highest_gap = last_tsn;
4558 if ((num_seg > 0) || (num_nr_seg > 0)) {
4561 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4562 * to be greater than the cumack. Also reset saw_newack to 0
4565 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4566 net->saw_newack = 0;
4567 net->this_sack_highest_newack = last_tsn;
4571 * thisSackHighestGap will increase while handling NEW
4572 * segments this_sack_highest_newack will increase while
4573 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4574 * used for CMT DAC algo. saw_newack will also change.
4576 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4577 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4578 num_seg, num_nr_seg, &ecn_seg_sums)) {
4581 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4583 * validate the biggest_tsn_acked in the gap acks if
4584 * strict adherence is wanted.
4586 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4588 * peer is either confused or we are under
4589 * attack. We must abort.
4591 printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4599 /*******************************************/
4600 /* cancel ALL T3-send timer if accum moved */
4601 /*******************************************/
4602 if (asoc->sctp_cmt_on_off > 0) {
4603 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4604 if (net->new_pseudo_cumack)
4605 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4607 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4612 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4613 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4614 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4618 /********************************************/
4619 /* drop the acked chunks from the sentqueue */
4620 /********************************************/
4621 asoc->last_acked_seq = cum_ack;
4623 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4624 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4627 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4628 /* no more sent on list */
4629 printf("Warning, tp1->sent == %d and its now acked?\n",
4632 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4633 if (tp1->pr_sctp_on) {
4634 if (asoc->pr_sctp_cnt != 0)
4635 asoc->pr_sctp_cnt--;
4637 asoc->sent_queue_cnt--;
4639 /* sa_ignore NO_NULL_CHK */
4640 sctp_free_bufspace(stcb, asoc, tp1, 1);
4641 sctp_m_freem(tp1->data);
4643 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4644 asoc->sent_queue_cnt_removeable--;
4647 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4648 sctp_log_sack(asoc->last_acked_seq,
4650 tp1->rec.data.TSN_seq,
4653 SCTP_LOG_FREE_SENT);
4655 sctp_free_a_chunk(stcb, tp1);
4658 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4660 panic("Warning flight size is postive and should be 0");
4662 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4663 asoc->total_flight);
4665 asoc->total_flight = 0;
4667 /* sa_ignore NO_NULL_CHK */
4668 if ((wake_him) && (stcb->sctp_socket)) {
4669 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4673 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4674 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4675 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4677 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4678 so = SCTP_INP_SO(stcb->sctp_ep);
4679 atomic_add_int(&stcb->asoc.refcnt, 1);
4680 SCTP_TCB_UNLOCK(stcb);
4681 SCTP_SOCKET_LOCK(so, 1);
4682 SCTP_TCB_LOCK(stcb);
4683 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4684 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4685 /* assoc was freed while we were unlocked */
4686 SCTP_SOCKET_UNLOCK(so, 1);
4690 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4691 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4692 SCTP_SOCKET_UNLOCK(so, 1);
4695 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4696 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4700 if (asoc->fast_retran_loss_recovery && accum_moved) {
4701 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4702 /* Setup so we will exit RFC2582 fast recovery */
4703 will_exit_fast_recovery = 1;
4707 * Check for revoked fragments:
4709 * if Previous sack - Had no frags then we can't have any revoked if
4710 * Previous sack - Had frag's then - If we now have frags aka
4711 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4712 * some of them. else - The peer revoked all ACKED fragments, since
4713 * we had some before and now we have NONE.
4717 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4718 asoc->saw_sack_with_frags = 1;
4719 } else if (asoc->saw_sack_with_frags) {
4720 int cnt_revoked = 0;
4722 /* Peer revoked all dg's marked or acked */
4723 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4724 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4725 tp1->sent = SCTP_DATAGRAM_SENT;
4726 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4727 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4728 tp1->whoTo->flight_size,
4730 (uintptr_t) tp1->whoTo,
4731 tp1->rec.data.TSN_seq);
4733 sctp_flight_size_increase(tp1);
4734 sctp_total_flight_increase(stcb, tp1);
4735 tp1->rec.data.chunk_was_revoked = 1;
4737 * To ensure that this increase in
4738 * flightsize, which is artificial, does not
4739 * throttle the sender, we also increase the
4740 * cwnd artificially.
4742 tp1->whoTo->cwnd += tp1->book_size;
4749 asoc->saw_sack_with_frags = 0;
4752 asoc->saw_sack_with_nr_frags = 1;
4754 asoc->saw_sack_with_nr_frags = 0;
4756 /* JRS - Use the congestion control given in the CC module */
4757 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4759 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4760 /* nothing left in-flight */
4761 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4762 /* stop all timers */
4763 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4764 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4765 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4766 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4767 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4770 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4771 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4772 net->flight_size = 0;
4773 net->partial_bytes_acked = 0;
4775 asoc->total_flight = 0;
4776 asoc->total_flight_count = 0;
4778 /**********************************/
4779 /* Now what about shutdown issues */
4780 /**********************************/
4781 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4782 /* nothing left on sendqueue.. consider done */
4783 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4784 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4785 asoc->peers_rwnd, 0, 0, a_rwnd);
4787 asoc->peers_rwnd = a_rwnd;
4788 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4789 /* SWS sender side engages */
4790 asoc->peers_rwnd = 0;
4793 if ((asoc->stream_queue_cnt == 1) &&
4794 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4795 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4796 (asoc->locked_on_sending)
4798 struct sctp_stream_queue_pending *sp;
4801 * I may be in a state where we got all across.. but
4802 * cannot write more due to a shutdown... we abort
4803 * since the user did not indicate EOR in this case.
4805 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4807 if ((sp) && (sp->length == 0)) {
4808 asoc->locked_on_sending = NULL;
4809 if (sp->msg_is_complete) {
4810 asoc->stream_queue_cnt--;
4812 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4813 asoc->stream_queue_cnt--;
4817 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4818 (asoc->stream_queue_cnt == 0)) {
4819 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4820 /* Need to abort here */
4826 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4827 0, M_DONTWAIT, 1, MT_DATA);
4829 struct sctp_paramhdr *ph;
4832 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4834 ph = mtod(oper, struct sctp_paramhdr *);
4835 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4836 ph->param_length = htons(SCTP_BUF_LEN(oper));
4837 ippp = (uint32_t *) (ph + 1);
4838 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4840 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4841 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4844 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4845 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4846 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4848 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4849 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4850 sctp_stop_timers_for_shutdown(stcb);
4851 sctp_send_shutdown(stcb,
4852 stcb->asoc.primary_destination);
4853 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4854 stcb->sctp_ep, stcb, asoc->primary_destination);
4855 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4856 stcb->sctp_ep, stcb, asoc->primary_destination);
4859 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4860 (asoc->stream_queue_cnt == 0)) {
4861 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4864 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4865 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4866 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4867 sctp_send_shutdown_ack(stcb,
4868 stcb->asoc.primary_destination);
4869 sctp_stop_timers_for_shutdown(stcb);
4870 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4871 stcb->sctp_ep, stcb, asoc->primary_destination);
4876 * Now here we are going to recycle net_ack for a different use...
4879 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4884 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4885 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4886 * automatically ensure that.
4888 if ((asoc->sctp_cmt_on_off > 0) &&
4889 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4890 (cmt_dac_flag == 0)) {
4891 this_sack_lowest_newack = cum_ack;
4893 if ((num_seg > 0) || (num_nr_seg > 0)) {
4894 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4895 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4897 /* JRS - Use the congestion control given in the CC module */
4898 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4900 /* Now are we exiting loss recovery ? */
4901 if (will_exit_fast_recovery) {
4902 /* Ok, we must exit fast recovery */
4903 asoc->fast_retran_loss_recovery = 0;
4905 if ((asoc->sat_t3_loss_recovery) &&
4906 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4907 /* end satellite t3 loss recovery */
4908 asoc->sat_t3_loss_recovery = 0;
4913 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4914 if (net->will_exit_fast_recovery) {
4915 /* Ok, we must exit fast recovery */
4916 net->fast_retran_loss_recovery = 0;
4920 /* Adjust and set the new rwnd value */
4921 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4922 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4923 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4925 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4926 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4927 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4928 /* SWS sender side engages */
4929 asoc->peers_rwnd = 0;
4931 if (asoc->peers_rwnd > old_rwnd) {
4932 win_probe_recovery = 1;
4935 * Now we must setup so we have a timer up for anyone with
4941 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4942 if (win_probe_recovery && (net->window_probe)) {
4943 win_probe_recovered = 1;
4945 * Find first chunk that was used with
4946 * window probe and clear the event. Put
4947 * it back into the send queue as if has
4950 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4951 if (tp1->window_probe) {
4952 sctp_window_probe_recovery(stcb, asoc, net, tp1);
4957 if (net->flight_size) {
4959 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4960 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4961 stcb->sctp_ep, stcb, net);
4963 if (net->window_probe) {
4964 net->window_probe = 0;
4967 if (net->window_probe) {
4969 * In window probes we must assure a timer
4970 * is still running there
4972 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4973 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4974 stcb->sctp_ep, stcb, net);
4977 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4978 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4980 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4982 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4983 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4984 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4985 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4986 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4992 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4993 (asoc->sent_queue_retran_cnt == 0) &&
4994 (win_probe_recovered == 0) &&
4997 * huh, this should not happen unless all packets are
4998 * PR-SCTP and marked to skip of course.
5000 if (sctp_fs_audit(asoc)) {
5001 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5002 net->flight_size = 0;
5004 asoc->total_flight = 0;
5005 asoc->total_flight_count = 0;
5006 asoc->sent_queue_retran_cnt = 0;
5007 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5008 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5009 sctp_flight_size_increase(tp1);
5010 sctp_total_flight_increase(stcb, tp1);
5011 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5012 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5019 /*********************************************/
5020 /* Here we perform PR-SCTP procedures */
5022 /*********************************************/
5023 /* C1. update advancedPeerAckPoint */
5024 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5025 asoc->advanced_peer_ack_point = cum_ack;
5027 /* C2. try to further move advancedPeerAckPoint ahead */
5028 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5029 struct sctp_tmit_chunk *lchk;
5030 uint32_t old_adv_peer_ack_point;
5032 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5033 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5034 /* C3. See if we need to send a Fwd-TSN */
5035 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5037 * ISSUE with ECN, see FWD-TSN processing.
5039 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5040 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5041 0xee, cum_ack, asoc->advanced_peer_ack_point,
5042 old_adv_peer_ack_point);
5044 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5045 send_forward_tsn(stcb, asoc);
5047 /* try to FR fwd-tsn's that get lost too */
5048 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5049 send_forward_tsn(stcb, asoc);
5054 /* Assure a timer is up */
5055 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5056 stcb->sctp_ep, stcb, lchk->whoTo);
5059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5060 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5062 stcb->asoc.peers_rwnd,
5063 stcb->asoc.total_flight,
5064 stcb->asoc.total_output_queue_size);
5069 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5070 struct sctp_nets *netp, int *abort_flag)
5073 uint32_t cum_ack, a_rwnd;
5075 cum_ack = ntohl(cp->cumulative_tsn_ack);
5076 /* Arrange so a_rwnd does NOT change */
5077 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5079 /* Now call the express sack handling */
5080 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag);
5084 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5085 struct sctp_stream_in *strmin)
5087 struct sctp_queued_to_read *ctl, *nctl;
5088 struct sctp_association *asoc;
5092 tt = strmin->last_sequence_delivered;
5094 * First deliver anything prior to and including the stream no that
5097 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5098 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5099 /* this is deliverable now */
5100 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5101 /* subtract pending on streams */
5102 asoc->size_on_all_streams -= ctl->length;
5103 sctp_ucount_decr(asoc->cnt_on_all_streams);
5104 /* deliver it to at least the delivery-q */
5105 if (stcb->sctp_socket) {
5106 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5107 sctp_add_to_readq(stcb->sctp_ep, stcb,
5109 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5112 /* no more delivery now. */
5117 * now we must deliver things in queue the normal way if any are
5120 tt = strmin->last_sequence_delivered + 1;
5121 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5122 if (tt == ctl->sinfo_ssn) {
5123 /* this is deliverable now */
5124 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5125 /* subtract pending on streams */
5126 asoc->size_on_all_streams -= ctl->length;
5127 sctp_ucount_decr(asoc->cnt_on_all_streams);
5128 /* deliver it to at least the delivery-q */
5129 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5130 if (stcb->sctp_socket) {
5131 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5132 sctp_add_to_readq(stcb->sctp_ep, stcb,
5134 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5137 tt = strmin->last_sequence_delivered + 1;
5145 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5146 struct sctp_association *asoc,
5147 uint16_t stream, uint16_t seq)
5149 struct sctp_tmit_chunk *chk, *nchk;
5151 /* For each one on here see if we need to toss it */
5153 * For now large messages held on the reasmqueue that are complete
5154 * will be tossed too. We could in theory do more work to spin
5155 * through and stop after dumping one msg aka seeing the start of a
5156 * new msg at the head, and call the delivery function... to see if
5157 * it can be delivered... But for now we just dump everything on the
5160 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5162 * Do not toss it if on a different stream or marked for
5163 * unordered delivery in which case the stream sequence
5164 * number has no meaning.
5166 if ((chk->rec.data.stream_number != stream) ||
5167 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5170 if (chk->rec.data.stream_seq == seq) {
5171 /* It needs to be tossed */
5172 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5173 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5174 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5175 asoc->str_of_pdapi = chk->rec.data.stream_number;
5176 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5177 asoc->fragment_flags = chk->rec.data.rcv_flags;
5179 asoc->size_on_reasm_queue -= chk->send_size;
5180 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5182 /* Clear up any stream problem */
5183 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5184 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5186 * We must dump forward this streams
5187 * sequence number if the chunk is not
5188 * unordered that is being skipped. There is
5189 * a chance that if the peer does not
5190 * include the last fragment in its FWD-TSN
5191 * we WILL have a problem here since you
5192 * would have a partial chunk in queue that
5193 * may not be deliverable. Also if a Partial
5194 * delivery API as started the user may get
5195 * a partial chunk. The next read returning
5196 * a new chunk... really ugly but I see no
5197 * way around it! Maybe a notify??
5199 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5202 sctp_m_freem(chk->data);
5205 sctp_free_a_chunk(stcb, chk);
5206 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5208 * If the stream_seq is > than the purging one, we
5218 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5219 struct sctp_forward_tsn_chunk *fwd,
5220 int *abort_flag, struct mbuf *m, int offset)
5222 /* The pr-sctp fwd tsn */
5224 * here we will perform all the data receiver side steps for
5225 * processing FwdTSN, as required in by pr-sctp draft:
5227 * Assume we get FwdTSN(x):
5229 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5230 * others we have 3) examine and update re-ordering queue on
5231 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5232 * report where we are.
5234 struct sctp_association *asoc;
5235 uint32_t new_cum_tsn, gap;
5236 unsigned int i, fwd_sz, cumack_set_flag, m_size;
5238 struct sctp_stream_in *strm;
5239 struct sctp_tmit_chunk *chk, *nchk;
5240 struct sctp_queued_to_read *ctl, *sv;
5242 cumack_set_flag = 0;
5244 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5245 SCTPDBG(SCTP_DEBUG_INDATA1,
5246 "Bad size too small/big fwd-tsn\n");
5249 m_size = (stcb->asoc.mapping_array_size << 3);
5250 /*************************************************************/
5251 /* 1. Here we update local cumTSN and shift the bitmap array */
5252 /*************************************************************/
5253 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5255 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5256 /* Already got there ... */
5260 * now we know the new TSN is more advanced, let's find the actual
5263 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5264 asoc->cumulative_tsn = new_cum_tsn;
5265 if (gap >= m_size) {
5266 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5270 * out of range (of single byte chunks in the rwnd I
5271 * give out). This must be an attacker.
5274 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5275 0, M_DONTWAIT, 1, MT_DATA);
5277 struct sctp_paramhdr *ph;
5280 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5281 (sizeof(uint32_t) * 3);
5282 ph = mtod(oper, struct sctp_paramhdr *);
5283 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5284 ph->param_length = htons(SCTP_BUF_LEN(oper));
5285 ippp = (uint32_t *) (ph + 1);
5286 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5288 *ippp = asoc->highest_tsn_inside_map;
5290 *ippp = new_cum_tsn;
5292 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5293 sctp_abort_an_association(stcb->sctp_ep, stcb,
5294 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5297 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5299 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5300 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5301 asoc->highest_tsn_inside_map = new_cum_tsn;
5303 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5304 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5306 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5307 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5310 SCTP_TCB_LOCK_ASSERT(stcb);
5311 for (i = 0; i <= gap; i++) {
5312 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5313 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5314 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5315 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5316 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5321 /*************************************************************/
5322 /* 2. Clear up re-assembly queue */
5323 /*************************************************************/
5325 * First service it if pd-api is up, just in case we can progress it
5328 if (asoc->fragmented_delivery_inprogress) {
5329 sctp_service_reassembly(stcb, asoc);
5331 /* For each one on here see if we need to toss it */
5333 * For now large messages held on the reasmqueue that are complete
5334 * will be tossed too. We could in theory do more work to spin
5335 * through and stop after dumping one msg aka seeing the start of a
5336 * new msg at the head, and call the delivery function... to see if
5337 * it can be delivered... But for now we just dump everything on the
5340 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5341 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5342 /* It needs to be tossed */
5343 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5344 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5345 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5346 asoc->str_of_pdapi = chk->rec.data.stream_number;
5347 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5348 asoc->fragment_flags = chk->rec.data.rcv_flags;
5350 asoc->size_on_reasm_queue -= chk->send_size;
5351 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5353 /* Clear up any stream problem */
5354 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5355 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5357 * We must dump forward this streams
5358 * sequence number if the chunk is not
5359 * unordered that is being skipped. There is
5360 * a chance that if the peer does not
5361 * include the last fragment in its FWD-TSN
5362 * we WILL have a problem here since you
5363 * would have a partial chunk in queue that
5364 * may not be deliverable. Also if a Partial
5365 * delivery API as started the user may get
5366 * a partial chunk. The next read returning
5367 * a new chunk... really ugly but I see no
5368 * way around it! Maybe a notify??
5370 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5373 sctp_m_freem(chk->data);
5376 sctp_free_a_chunk(stcb, chk);
5379 * Ok we have gone beyond the end of the fwd-tsn's
5385 /*******************************************************/
5386 /* 3. Update the PR-stream re-ordering queues and fix */
5387 /* delivery issues as needed. */
5388 /*******************************************************/
5389 fwd_sz -= sizeof(*fwd);
5392 unsigned int num_str;
5393 struct sctp_strseq *stseq, strseqbuf;
5395 offset += sizeof(*fwd);
5397 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5398 num_str = fwd_sz / sizeof(struct sctp_strseq);
5399 for (i = 0; i < num_str; i++) {
5402 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5403 sizeof(struct sctp_strseq),
5404 (uint8_t *) & strseqbuf);
5405 offset += sizeof(struct sctp_strseq);
5406 if (stseq == NULL) {
5410 st = ntohs(stseq->stream);
5412 st = ntohs(stseq->sequence);
5413 stseq->sequence = st;
5418 * Ok we now look for the stream/seq on the read
5419 * queue where its not all delivered. If we find it
5420 * we transmute the read entry into a PDI_ABORTED.
5422 if (stseq->stream >= asoc->streamincnt) {
5423 /* screwed up streams, stop! */
5426 if ((asoc->str_of_pdapi == stseq->stream) &&
5427 (asoc->ssn_of_pdapi == stseq->sequence)) {
5429 * If this is the one we were partially
5430 * delivering now then we no longer are.
5431 * Note this will change with the reassembly
5434 asoc->fragmented_delivery_inprogress = 0;
5436 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5437 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5438 if ((ctl->sinfo_stream == stseq->stream) &&
5439 (ctl->sinfo_ssn == stseq->sequence)) {
5440 str_seq = (stseq->stream << 16) | stseq->sequence;
5442 ctl->pdapi_aborted = 1;
5443 sv = stcb->asoc.control_pdapi;
5444 stcb->asoc.control_pdapi = ctl;
5445 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5447 SCTP_PARTIAL_DELIVERY_ABORTED,
5449 SCTP_SO_NOT_LOCKED);
5450 stcb->asoc.control_pdapi = sv;
5452 } else if ((ctl->sinfo_stream == stseq->stream) &&
5453 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5454 /* We are past our victim SSN */
5458 strm = &asoc->strmin[stseq->stream];
5459 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5460 /* Update the sequence number */
5461 strm->last_sequence_delivered = stseq->sequence;
5463 /* now kick the stream the new way */
5464 /* sa_ignore NO_NULL_CHK */
5465 sctp_kick_prsctp_reorder_queue(stcb, strm);
5467 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5470 * Now slide thing forward.
5472 sctp_slide_mapping_arrays(stcb);
5474 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5475 /* now lets kick out and check for more fragmented delivery */
5476 /* sa_ignore NO_NULL_CHK */
5477 sctp_deliver_reasm_check(stcb, &stcb->asoc);