2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
109 if (calc < stcb->asoc.my_rwnd_control_len) {
118 * Build out our readq entry based on the incoming packet.
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
128 struct sctp_queued_to_read *read_queue_e = NULL;
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = stcb->asoc.context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
157 return (read_queue_e);
162 * Build out our readq entry based on the incoming packet.
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
168 struct sctp_queued_to_read *read_queue_e = NULL;
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
197 return (read_queue_e);
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
203 struct sctp_sndrcvinfo *sinfo)
205 struct sctp_sndrcvinfo *outinfo;
209 int use_extended = 0;
211 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
212 /* user does not want the sndrcv ctl */
215 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
217 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
219 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
223 ret = sctp_get_mbuf_for_msg(len,
224 0, M_DONTWAIT, 1, MT_DATA);
230 /* We need a CMSG header followed by the struct */
231 cmh = mtod(ret, struct cmsghdr *);
232 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
233 cmh->cmsg_level = IPPROTO_SCTP;
235 cmh->cmsg_type = SCTP_EXTRCV;
237 memcpy(outinfo, sinfo, len);
239 cmh->cmsg_type = SCTP_SNDRCV;
243 SCTP_BUF_LEN(ret) = cmh->cmsg_len;
249 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
251 struct sctp_sndrcvinfo *sinfo)
253 struct sctp_sndrcvinfo *outinfo;
257 int use_extended = 0;
259 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
260 /* user does not want the sndrcv ctl */
263 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
265 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
267 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
269 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
274 /* We need a CMSG header followed by the struct */
275 cmh = (struct cmsghdr *)buf;
276 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
277 cmh->cmsg_level = IPPROTO_SCTP;
279 cmh->cmsg_type = SCTP_EXTRCV;
281 memcpy(outinfo, sinfo, len);
283 cmh->cmsg_type = SCTP_SNDRCV;
292 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
294 uint32_t gap, i, cumackp1;
297 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
300 cumackp1 = asoc->cumulative_tsn + 1;
301 if (SCTP_TSN_GT(cumackp1, tsn)) {
303 * this tsn is behind the cum ack and thus we don't need to
304 * worry about it being moved from one to the other.
308 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
309 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
310 printf("gap:%x tsn:%x\n", gap, tsn);
311 sctp_print_mapping_array(asoc);
313 panic("Things are really messed up now!!");
316 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
318 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
319 asoc->highest_tsn_inside_nr_map = tsn;
321 if (tsn == asoc->highest_tsn_inside_map) {
322 /* We must back down to see what the new highest is */
323 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
324 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
325 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
326 asoc->highest_tsn_inside_map = i;
332 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
339 * We are delivering currently from the reassembly queue. We must continue to
340 * deliver until we either: 1) run out of space. 2) run out of sequential
341 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
344 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
346 struct sctp_tmit_chunk *chk, *nchk;
351 struct sctp_queued_to_read *control, *ctl, *nctl;
356 cntDel = stream_no = 0;
357 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
358 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
359 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
360 /* socket above is long gone or going.. */
362 asoc->fragmented_delivery_inprogress = 0;
363 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
364 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
365 asoc->size_on_reasm_queue -= chk->send_size;
366 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
368 * Lose the data pointer, since its in the socket
372 sctp_m_freem(chk->data);
375 /* Now free the address and data */
376 sctp_free_a_chunk(stcb, chk);
377 /* sa_ignore FREED_MEMORY */
381 SCTP_TCB_LOCK_ASSERT(stcb);
382 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
384 /* Can't deliver more :< */
387 stream_no = chk->rec.data.stream_number;
388 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
389 if (nxt_todel != chk->rec.data.stream_seq &&
390 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
392 * Not the next sequence to deliver in its stream OR
397 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
399 control = sctp_build_readq_entry_chk(stcb, chk);
400 if (control == NULL) {
404 /* save it off for our future deliveries */
405 stcb->asoc.control_pdapi = control;
406 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
410 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
411 sctp_add_to_readq(stcb->sctp_ep,
412 stcb, control, &stcb->sctp_socket->so_rcv, end,
413 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
416 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
420 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
421 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
422 stcb->asoc.control_pdapi,
423 chk->data, end, chk->rec.data.TSN_seq,
424 &stcb->sctp_socket->so_rcv)) {
426 * something is very wrong, either
427 * control_pdapi is NULL, or the tail_mbuf
428 * is corrupt, or there is a EOM already on
431 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
435 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
436 panic("This should not happen control_pdapi NULL?");
438 /* if we did not panic, it was a EOM */
439 panic("Bad chunking ??");
441 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
442 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
444 SCTP_PRINTF("Bad chunking ??\n");
445 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
453 /* pull it we did it */
454 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
455 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
456 asoc->fragmented_delivery_inprogress = 0;
457 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
458 asoc->strmin[stream_no].last_sequence_delivered++;
460 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
461 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
463 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
465 * turn the flag back on since we just delivered
468 asoc->fragmented_delivery_inprogress = 1;
470 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
471 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
472 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
473 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
475 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
476 asoc->size_on_reasm_queue -= chk->send_size;
477 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
478 /* free up the chk */
480 sctp_free_a_chunk(stcb, chk);
482 if (asoc->fragmented_delivery_inprogress == 0) {
484 * Now lets see if we can deliver the next one on
487 struct sctp_stream_in *strm;
489 strm = &asoc->strmin[stream_no];
490 nxt_todel = strm->last_sequence_delivered + 1;
491 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
492 /* Deliver more if we can. */
493 if (nxt_todel == ctl->sinfo_ssn) {
494 TAILQ_REMOVE(&strm->inqueue, ctl, next);
495 asoc->size_on_all_streams -= ctl->length;
496 sctp_ucount_decr(asoc->cnt_on_all_streams);
497 strm->last_sequence_delivered++;
498 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
499 sctp_add_to_readq(stcb->sctp_ep, stcb,
501 &stcb->sctp_socket->so_rcv, 1,
502 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
506 nxt_todel = strm->last_sequence_delivered + 1;
514 * Queue the chunk either right into the socket buffer if it is the next one
515 * to go OR put it in the correct place in the delivery queue. If we do
516 * append to the so_buf, keep doing so until we are out of order. One big
517 * question still remains, what to do when the socket buffer is FULL??
520 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
521 struct sctp_queued_to_read *control, int *abort_flag)
524 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
525 * all the data in one stream this could happen quite rapidly. One
526 * could use the TSN to keep track of things, but this scheme breaks
527 * down in the other type of stream useage that could occur. Send a
528 * single msg to stream 0, send 4Billion messages to stream 1, now
529 * send a message to stream 0. You have a situation where the TSN
530 * has wrapped but not in the stream. Is this worth worrying about
531 * or should we just change our queue sort at the bottom to be by
534 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
535 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
536 * assignment this could happen... and I don't see how this would be
537 * a violation. So for now I am undecided an will leave the sort by
538 * SSN alone. Maybe a hybred approach is the answer
541 struct sctp_stream_in *strm;
542 struct sctp_queued_to_read *at;
548 asoc->size_on_all_streams += control->length;
549 sctp_ucount_incr(asoc->cnt_on_all_streams);
550 strm = &asoc->strmin[control->sinfo_stream];
551 nxt_todel = strm->last_sequence_delivered + 1;
552 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
553 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
555 SCTPDBG(SCTP_DEBUG_INDATA1,
556 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
557 (uint32_t) control->sinfo_stream,
558 (uint32_t) strm->last_sequence_delivered,
559 (uint32_t) nxt_todel);
560 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
561 /* The incoming sseq is behind where we last delivered? */
562 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
563 control->sinfo_ssn, strm->last_sequence_delivered);
566 * throw it in the stream so it gets cleaned up in
567 * association destruction
569 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
570 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
571 0, M_DONTWAIT, 1, MT_DATA);
573 struct sctp_paramhdr *ph;
576 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
577 (sizeof(uint32_t) * 3);
578 ph = mtod(oper, struct sctp_paramhdr *);
579 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
580 ph->param_length = htons(SCTP_BUF_LEN(oper));
581 ippp = (uint32_t *) (ph + 1);
582 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
584 *ippp = control->sinfo_tsn;
586 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
588 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
589 sctp_abort_an_association(stcb->sctp_ep, stcb,
590 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
596 if (nxt_todel == control->sinfo_ssn) {
597 /* can be delivered right away? */
598 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
599 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
601 /* EY it wont be queued if it could be delivered directly */
603 asoc->size_on_all_streams -= control->length;
604 sctp_ucount_decr(asoc->cnt_on_all_streams);
605 strm->last_sequence_delivered++;
607 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
608 sctp_add_to_readq(stcb->sctp_ep, stcb,
610 &stcb->sctp_socket->so_rcv, 1,
611 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
612 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
614 nxt_todel = strm->last_sequence_delivered + 1;
615 if (nxt_todel == control->sinfo_ssn) {
616 TAILQ_REMOVE(&strm->inqueue, control, next);
617 asoc->size_on_all_streams -= control->length;
618 sctp_ucount_decr(asoc->cnt_on_all_streams);
619 strm->last_sequence_delivered++;
621 * We ignore the return of deliver_data here
622 * since we always can hold the chunk on the
623 * d-queue. And we have a finite number that
624 * can be delivered from the strq.
626 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
627 sctp_log_strm_del(control, NULL,
628 SCTP_STR_LOG_FROM_IMMED_DEL);
630 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
631 sctp_add_to_readq(stcb->sctp_ep, stcb,
633 &stcb->sctp_socket->so_rcv, 1,
634 SCTP_READ_LOCK_NOT_HELD,
643 * Ok, we did not deliver this guy, find the correct place
644 * to put it on the queue.
646 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
649 if (TAILQ_EMPTY(&strm->inqueue)) {
651 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
652 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
654 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
656 TAILQ_FOREACH(at, &strm->inqueue, next) {
657 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
659 * one in queue is bigger than the
660 * new one, insert before this one
662 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
663 sctp_log_strm_del(control, at,
664 SCTP_STR_LOG_FROM_INSERT_MD);
666 TAILQ_INSERT_BEFORE(at, control, next);
668 } else if (at->sinfo_ssn == control->sinfo_ssn) {
670 * Gak, He sent me a duplicate str
674 * foo bar, I guess I will just free
675 * this new guy, should we abort
676 * too? FIX ME MAYBE? Or it COULD be
677 * that the SSN's have wrapped.
678 * Maybe I should compare to TSN
679 * somehow... sigh for now just blow
684 sctp_m_freem(control->data);
685 control->data = NULL;
686 asoc->size_on_all_streams -= control->length;
687 sctp_ucount_decr(asoc->cnt_on_all_streams);
688 if (control->whoFrom) {
689 sctp_free_remote_addr(control->whoFrom);
690 control->whoFrom = NULL;
692 sctp_free_a_readq(stcb, control);
695 if (TAILQ_NEXT(at, next) == NULL) {
697 * We are at the end, insert
700 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
701 sctp_log_strm_del(control, at,
702 SCTP_STR_LOG_FROM_INSERT_TL);
704 TAILQ_INSERT_AFTER(&strm->inqueue,
715 * Returns two things: You get the total size of the deliverable parts of the
716 * first fragmented message on the reassembly queue. And you get a 1 back if
717 * all of the message is ready or a 0 back if the message is still incomplete
720 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
722 struct sctp_tmit_chunk *chk;
726 chk = TAILQ_FIRST(&asoc->reasmqueue);
728 /* nothing on the queue */
731 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
732 /* Not a first on the queue */
735 tsn = chk->rec.data.TSN_seq;
736 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
737 if (tsn != chk->rec.data.TSN_seq) {
740 *t_size += chk->send_size;
741 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
750 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
752 struct sctp_tmit_chunk *chk;
754 uint32_t tsize, pd_point;
757 chk = TAILQ_FIRST(&asoc->reasmqueue);
760 asoc->size_on_reasm_queue = 0;
761 asoc->cnt_on_reasm_queue = 0;
764 if (asoc->fragmented_delivery_inprogress == 0) {
766 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
767 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
768 (nxt_todel == chk->rec.data.stream_seq ||
769 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
771 * Yep the first one is here and its ok to deliver
774 if (stcb->sctp_socket) {
775 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
776 stcb->sctp_ep->partial_delivery_point);
778 pd_point = stcb->sctp_ep->partial_delivery_point;
780 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
783 * Yes, we setup to start reception, by
784 * backing down the TSN just in case we
785 * can't deliver. If we
787 asoc->fragmented_delivery_inprogress = 1;
788 asoc->tsn_last_delivered =
789 chk->rec.data.TSN_seq - 1;
791 chk->rec.data.stream_number;
792 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
793 asoc->pdapi_ppid = chk->rec.data.payloadtype;
794 asoc->fragment_flags = chk->rec.data.rcv_flags;
795 sctp_service_reassembly(stcb, asoc);
800 * Service re-assembly will deliver stream data queued at
801 * the end of fragmented delivery.. but it wont know to go
802 * back and call itself again... we do that here with the
805 sctp_service_reassembly(stcb, asoc);
806 if (asoc->fragmented_delivery_inprogress == 0) {
808 * finished our Fragmented delivery, could be more
817 * Dump onto the re-assembly queue, in its proper place. After dumping on the
818 * queue, see if anthing can be delivered. If so pull it off (or as much as
819 * we can. If we run out of space then we must dump what we can and set the
820 * appropriate flag to say we queued what we could.
823 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
824 struct sctp_tmit_chunk *chk, int *abort_flag)
827 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
829 struct sctp_tmit_chunk *at, *prev, *next;
832 cum_ackp1 = asoc->tsn_last_delivered + 1;
833 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
834 /* This is the first one on the queue */
835 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
837 * we do not check for delivery of anything when only one
840 asoc->size_on_reasm_queue = chk->send_size;
841 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
842 if (chk->rec.data.TSN_seq == cum_ackp1) {
843 if (asoc->fragmented_delivery_inprogress == 0 &&
844 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
845 SCTP_DATA_FIRST_FRAG) {
847 * An empty queue, no delivery inprogress,
848 * we hit the next one and it does NOT have
849 * a FIRST fragment mark.
851 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
852 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
853 0, M_DONTWAIT, 1, MT_DATA);
856 struct sctp_paramhdr *ph;
860 sizeof(struct sctp_paramhdr) +
861 (sizeof(uint32_t) * 3);
862 ph = mtod(oper, struct sctp_paramhdr *);
864 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
865 ph->param_length = htons(SCTP_BUF_LEN(oper));
866 ippp = (uint32_t *) (ph + 1);
867 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
869 *ippp = chk->rec.data.TSN_seq;
871 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
874 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
875 sctp_abort_an_association(stcb->sctp_ep, stcb,
876 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
878 } else if (asoc->fragmented_delivery_inprogress &&
879 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
881 * We are doing a partial delivery and the
882 * NEXT chunk MUST be either the LAST or
883 * MIDDLE fragment NOT a FIRST
885 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
886 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
887 0, M_DONTWAIT, 1, MT_DATA);
889 struct sctp_paramhdr *ph;
893 sizeof(struct sctp_paramhdr) +
894 (3 * sizeof(uint32_t));
895 ph = mtod(oper, struct sctp_paramhdr *);
897 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
898 ph->param_length = htons(SCTP_BUF_LEN(oper));
899 ippp = (uint32_t *) (ph + 1);
900 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
902 *ippp = chk->rec.data.TSN_seq;
904 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
906 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
907 sctp_abort_an_association(stcb->sctp_ep, stcb,
908 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
910 } else if (asoc->fragmented_delivery_inprogress) {
912 * Here we are ok with a MIDDLE or LAST
915 if (chk->rec.data.stream_number !=
916 asoc->str_of_pdapi) {
917 /* Got to be the right STR No */
918 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
919 chk->rec.data.stream_number,
921 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
922 0, M_DONTWAIT, 1, MT_DATA);
924 struct sctp_paramhdr *ph;
928 sizeof(struct sctp_paramhdr) +
929 (sizeof(uint32_t) * 3);
931 struct sctp_paramhdr *);
933 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
935 htons(SCTP_BUF_LEN(oper));
936 ippp = (uint32_t *) (ph + 1);
937 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
939 *ippp = chk->rec.data.TSN_seq;
941 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
943 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
944 sctp_abort_an_association(stcb->sctp_ep,
945 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
947 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
948 SCTP_DATA_UNORDERED &&
949 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
950 /* Got to be the right STR Seq */
951 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
952 chk->rec.data.stream_seq,
954 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
955 0, M_DONTWAIT, 1, MT_DATA);
957 struct sctp_paramhdr *ph;
961 sizeof(struct sctp_paramhdr) +
962 (3 * sizeof(uint32_t));
964 struct sctp_paramhdr *);
966 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
968 htons(SCTP_BUF_LEN(oper));
969 ippp = (uint32_t *) (ph + 1);
970 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
972 *ippp = chk->rec.data.TSN_seq;
974 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
977 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
978 sctp_abort_an_association(stcb->sctp_ep,
979 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
987 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
988 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
990 * one in queue is bigger than the new one, insert
994 asoc->size_on_reasm_queue += chk->send_size;
995 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
997 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
999 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1000 /* Gak, He sent me a duplicate str seq number */
1002 * foo bar, I guess I will just free this new guy,
1003 * should we abort too? FIX ME MAYBE? Or it COULD be
1004 * that the SSN's have wrapped. Maybe I should
1005 * compare to TSN somehow... sigh for now just blow
1009 sctp_m_freem(chk->data);
1012 sctp_free_a_chunk(stcb, chk);
1015 last_flags = at->rec.data.rcv_flags;
1016 last_tsn = at->rec.data.TSN_seq;
1018 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1020 * We are at the end, insert it after this
1023 /* check it first */
1024 asoc->size_on_reasm_queue += chk->send_size;
1025 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1026 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1031 /* Now the audits */
1033 prev_tsn = chk->rec.data.TSN_seq - 1;
1034 if (prev_tsn == prev->rec.data.TSN_seq) {
1036 * Ok the one I am dropping onto the end is the
1037 * NEXT. A bit of valdiation here.
1039 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1040 SCTP_DATA_FIRST_FRAG ||
1041 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1042 SCTP_DATA_MIDDLE_FRAG) {
1044 * Insert chk MUST be a MIDDLE or LAST
1047 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1048 SCTP_DATA_FIRST_FRAG) {
1049 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1050 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1051 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1052 0, M_DONTWAIT, 1, MT_DATA);
1054 struct sctp_paramhdr *ph;
1057 SCTP_BUF_LEN(oper) =
1058 sizeof(struct sctp_paramhdr) +
1059 (3 * sizeof(uint32_t));
1061 struct sctp_paramhdr *);
1063 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1065 htons(SCTP_BUF_LEN(oper));
1066 ippp = (uint32_t *) (ph + 1);
1067 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1069 *ippp = chk->rec.data.TSN_seq;
1071 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1074 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1075 sctp_abort_an_association(stcb->sctp_ep,
1076 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1080 if (chk->rec.data.stream_number !=
1081 prev->rec.data.stream_number) {
1083 * Huh, need the correct STR here,
1084 * they must be the same.
1086 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1087 chk->rec.data.stream_number,
1088 prev->rec.data.stream_number);
1089 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1090 0, M_DONTWAIT, 1, MT_DATA);
1092 struct sctp_paramhdr *ph;
1095 SCTP_BUF_LEN(oper) =
1096 sizeof(struct sctp_paramhdr) +
1097 (3 * sizeof(uint32_t));
1099 struct sctp_paramhdr *);
1101 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1103 htons(SCTP_BUF_LEN(oper));
1104 ippp = (uint32_t *) (ph + 1);
1105 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1107 *ippp = chk->rec.data.TSN_seq;
1109 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1111 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1112 sctp_abort_an_association(stcb->sctp_ep,
1113 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1118 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1119 chk->rec.data.stream_seq !=
1120 prev->rec.data.stream_seq) {
1122 * Huh, need the correct STR here,
1123 * they must be the same.
1125 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1126 chk->rec.data.stream_seq,
1127 prev->rec.data.stream_seq);
1128 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1129 0, M_DONTWAIT, 1, MT_DATA);
1131 struct sctp_paramhdr *ph;
1134 SCTP_BUF_LEN(oper) =
1135 sizeof(struct sctp_paramhdr) +
1136 (3 * sizeof(uint32_t));
1138 struct sctp_paramhdr *);
1140 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1142 htons(SCTP_BUF_LEN(oper));
1143 ippp = (uint32_t *) (ph + 1);
1144 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1146 *ippp = chk->rec.data.TSN_seq;
1148 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1150 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1151 sctp_abort_an_association(stcb->sctp_ep,
1152 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1157 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1158 SCTP_DATA_LAST_FRAG) {
1159 /* Insert chk MUST be a FIRST */
1160 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1161 SCTP_DATA_FIRST_FRAG) {
1162 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1163 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1164 0, M_DONTWAIT, 1, MT_DATA);
1166 struct sctp_paramhdr *ph;
1169 SCTP_BUF_LEN(oper) =
1170 sizeof(struct sctp_paramhdr) +
1171 (3 * sizeof(uint32_t));
1173 struct sctp_paramhdr *);
1175 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1177 htons(SCTP_BUF_LEN(oper));
1178 ippp = (uint32_t *) (ph + 1);
1179 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1181 *ippp = chk->rec.data.TSN_seq;
1183 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1186 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1187 sctp_abort_an_association(stcb->sctp_ep,
1188 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1197 post_tsn = chk->rec.data.TSN_seq + 1;
1198 if (post_tsn == next->rec.data.TSN_seq) {
1200 * Ok the one I am inserting ahead of is my NEXT
1201 * one. A bit of valdiation here.
1203 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1204 /* Insert chk MUST be a last fragment */
1205 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1206 != SCTP_DATA_LAST_FRAG) {
1207 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1208 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1209 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1210 0, M_DONTWAIT, 1, MT_DATA);
1212 struct sctp_paramhdr *ph;
1215 SCTP_BUF_LEN(oper) =
1216 sizeof(struct sctp_paramhdr) +
1217 (3 * sizeof(uint32_t));
1219 struct sctp_paramhdr *);
1221 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1223 htons(SCTP_BUF_LEN(oper));
1224 ippp = (uint32_t *) (ph + 1);
1225 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1227 *ippp = chk->rec.data.TSN_seq;
1229 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1231 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1232 sctp_abort_an_association(stcb->sctp_ep,
1233 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1238 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1239 SCTP_DATA_MIDDLE_FRAG ||
1240 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1241 SCTP_DATA_LAST_FRAG) {
1243 * Insert chk CAN be MIDDLE or FIRST NOT
1246 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1247 SCTP_DATA_LAST_FRAG) {
1248 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1249 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1250 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1251 0, M_DONTWAIT, 1, MT_DATA);
1253 struct sctp_paramhdr *ph;
1256 SCTP_BUF_LEN(oper) =
1257 sizeof(struct sctp_paramhdr) +
1258 (3 * sizeof(uint32_t));
1260 struct sctp_paramhdr *);
1262 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1264 htons(SCTP_BUF_LEN(oper));
1265 ippp = (uint32_t *) (ph + 1);
1266 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1268 *ippp = chk->rec.data.TSN_seq;
1270 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1273 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1274 sctp_abort_an_association(stcb->sctp_ep,
1275 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1280 if (chk->rec.data.stream_number !=
1281 next->rec.data.stream_number) {
1283 * Huh, need the correct STR here,
1284 * they must be the same.
1286 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1287 chk->rec.data.stream_number,
1288 next->rec.data.stream_number);
1289 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1290 0, M_DONTWAIT, 1, MT_DATA);
1292 struct sctp_paramhdr *ph;
1295 SCTP_BUF_LEN(oper) =
1296 sizeof(struct sctp_paramhdr) +
1297 (3 * sizeof(uint32_t));
1299 struct sctp_paramhdr *);
1301 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1303 htons(SCTP_BUF_LEN(oper));
1304 ippp = (uint32_t *) (ph + 1);
1305 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1307 *ippp = chk->rec.data.TSN_seq;
1309 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1312 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1313 sctp_abort_an_association(stcb->sctp_ep,
1314 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1319 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1320 chk->rec.data.stream_seq !=
1321 next->rec.data.stream_seq) {
1323 * Huh, need the correct STR here,
1324 * they must be the same.
1326 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1327 chk->rec.data.stream_seq,
1328 next->rec.data.stream_seq);
1329 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1330 0, M_DONTWAIT, 1, MT_DATA);
1332 struct sctp_paramhdr *ph;
1335 SCTP_BUF_LEN(oper) =
1336 sizeof(struct sctp_paramhdr) +
1337 (3 * sizeof(uint32_t));
1339 struct sctp_paramhdr *);
1341 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1343 htons(SCTP_BUF_LEN(oper));
1344 ippp = (uint32_t *) (ph + 1);
1345 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1347 *ippp = chk->rec.data.TSN_seq;
1349 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1351 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1352 sctp_abort_an_association(stcb->sctp_ep,
1353 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1361 /* Do we need to do some delivery? check */
1362 sctp_deliver_reasm_check(stcb, asoc);
1366 * This is an unfortunate routine. It checks to make sure a evil guy is not
1367 * stuffing us full of bad packet fragments. A broken peer could also do this
1368 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1372 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1375 struct sctp_tmit_chunk *at;
1378 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1379 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1380 /* is it one bigger? */
1381 tsn_est = at->rec.data.TSN_seq + 1;
1382 if (tsn_est == TSN_seq) {
1383 /* yep. It better be a last then */
1384 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1385 SCTP_DATA_LAST_FRAG) {
1387 * Ok this guy belongs next to a guy
1388 * that is NOT last, it should be a
1389 * middle/last, not a complete
1395 * This guy is ok since its a LAST
1396 * and the new chunk is a fully
1397 * self- contained one.
1402 } else if (TSN_seq == at->rec.data.TSN_seq) {
1403 /* Software error since I have a dup? */
1407 * Ok, 'at' is larger than new chunk but does it
1408 * need to be right before it.
1410 tsn_est = TSN_seq + 1;
1411 if (tsn_est == at->rec.data.TSN_seq) {
1412 /* Yep, It better be a first */
1413 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1414 SCTP_DATA_FIRST_FRAG) {
1427 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1428 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1429 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1430 int *break_flag, int last_chunk)
1432 /* Process a data chunk */
1433 /* struct sctp_tmit_chunk *chk; */
1434 struct sctp_tmit_chunk *chk;
1438 int need_reasm_check = 0;
1439 uint16_t strmno, strmseq;
1441 struct sctp_queued_to_read *control;
1443 uint32_t protocol_id;
1444 uint8_t chunk_flags;
1445 struct sctp_stream_reset_list *liste;
1448 tsn = ntohl(ch->dp.tsn);
1449 chunk_flags = ch->ch.chunk_flags;
1450 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1451 asoc->send_sack = 1;
1453 protocol_id = ch->dp.protocol_id;
1454 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1455 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1456 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1461 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1462 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1463 /* It is a duplicate */
1464 SCTP_STAT_INCR(sctps_recvdupdata);
1465 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1466 /* Record a dup for the next outbound sack */
1467 asoc->dup_tsns[asoc->numduptsns] = tsn;
1470 asoc->send_sack = 1;
1473 /* Calculate the number of TSN's between the base and this TSN */
1474 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1475 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1476 /* Can't hold the bit in the mapping at max array, toss it */
1479 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1480 SCTP_TCB_LOCK_ASSERT(stcb);
1481 if (sctp_expand_mapping_array(asoc, gap)) {
1482 /* Can't expand, drop it */
1486 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1489 /* See if we have received this one already */
1490 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1491 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1492 SCTP_STAT_INCR(sctps_recvdupdata);
1493 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1494 /* Record a dup for the next outbound sack */
1495 asoc->dup_tsns[asoc->numduptsns] = tsn;
1498 asoc->send_sack = 1;
1502 * Check to see about the GONE flag, duplicates would cause a sack
1503 * to be sent up above
1505 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1506 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1507 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1510 * wait a minute, this guy is gone, there is no longer a
1511 * receiver. Send peer an ABORT!
1513 struct mbuf *op_err;
1515 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1516 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1521 * Now before going further we see if there is room. If NOT then we
1522 * MAY let one through only IF this TSN is the one we are waiting
1523 * for on a partial delivery API.
1526 /* now do the tests */
1527 if (((asoc->cnt_on_all_streams +
1528 asoc->cnt_on_reasm_queue +
1529 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1530 (((int)asoc->my_rwnd) <= 0)) {
1532 * When we have NO room in the rwnd we check to make sure
1533 * the reader is doing its job...
1535 if (stcb->sctp_socket->so_rcv.sb_cc) {
1536 /* some to read, wake-up */
1537 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1540 so = SCTP_INP_SO(stcb->sctp_ep);
1541 atomic_add_int(&stcb->asoc.refcnt, 1);
1542 SCTP_TCB_UNLOCK(stcb);
1543 SCTP_SOCKET_LOCK(so, 1);
1544 SCTP_TCB_LOCK(stcb);
1545 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1546 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1547 /* assoc was freed while we were unlocked */
1548 SCTP_SOCKET_UNLOCK(so, 1);
1552 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1553 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1554 SCTP_SOCKET_UNLOCK(so, 1);
1557 /* now is it in the mapping array of what we have accepted? */
1558 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1559 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1560 /* Nope not in the valid range dump it */
1561 sctp_set_rwnd(stcb, asoc);
1562 if ((asoc->cnt_on_all_streams +
1563 asoc->cnt_on_reasm_queue +
1564 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1565 SCTP_STAT_INCR(sctps_datadropchklmt);
1567 SCTP_STAT_INCR(sctps_datadroprwnd);
1574 strmno = ntohs(ch->dp.stream_id);
1575 if (strmno >= asoc->streamincnt) {
1576 struct sctp_paramhdr *phdr;
1579 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1580 0, M_DONTWAIT, 1, MT_DATA);
1582 /* add some space up front so prepend will work well */
1583 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1584 phdr = mtod(mb, struct sctp_paramhdr *);
1586 * Error causes are just param's and this one has
1587 * two back to back phdr, one with the error type
1588 * and size, the other with the streamid and a rsvd
1590 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1591 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1592 phdr->param_length =
1593 htons(sizeof(struct sctp_paramhdr) * 2);
1595 /* We insert the stream in the type field */
1596 phdr->param_type = ch->dp.stream_id;
1597 /* And set the length to 0 for the rsvd field */
1598 phdr->param_length = 0;
1599 sctp_queue_op_err(stcb, mb);
1601 SCTP_STAT_INCR(sctps_badsid);
1602 SCTP_TCB_LOCK_ASSERT(stcb);
1603 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1604 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1605 asoc->highest_tsn_inside_nr_map = tsn;
1607 if (tsn == (asoc->cumulative_tsn + 1)) {
1608 /* Update cum-ack */
1609 asoc->cumulative_tsn = tsn;
1614 * Before we continue lets validate that we are not being fooled by
1615 * an evil attacker. We can only have 4k chunks based on our TSN
1616 * spread allowed by the mapping array 512 * 8 bits, so there is no
1617 * way our stream sequence numbers could have wrapped. We of course
1618 * only validate the FIRST fragment so the bit must be set.
1620 strmseq = ntohs(ch->dp.stream_sequence);
1621 #ifdef SCTP_ASOCLOG_OF_TSNS
1622 SCTP_TCB_LOCK_ASSERT(stcb);
1623 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1624 asoc->tsn_in_at = 0;
1625 asoc->tsn_in_wrapped = 1;
1627 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1628 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1629 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1630 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1631 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1632 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1633 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1634 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1637 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1638 (TAILQ_EMPTY(&asoc->resetHead)) &&
1639 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1640 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1641 /* The incoming sseq is behind where we last delivered? */
1642 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1643 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1644 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1645 0, M_DONTWAIT, 1, MT_DATA);
1647 struct sctp_paramhdr *ph;
1650 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1651 (3 * sizeof(uint32_t));
1652 ph = mtod(oper, struct sctp_paramhdr *);
1653 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1654 ph->param_length = htons(SCTP_BUF_LEN(oper));
1655 ippp = (uint32_t *) (ph + 1);
1656 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1660 *ippp = ((strmno << 16) | strmseq);
1663 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1664 sctp_abort_an_association(stcb->sctp_ep, stcb,
1665 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1669 /************************************
1670 * From here down we may find ch-> invalid
1671 * so its a good idea NOT to use it.
1672 *************************************/
1674 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1675 if (last_chunk == 0) {
1676 dmbuf = SCTP_M_COPYM(*m,
1677 (offset + sizeof(struct sctp_data_chunk)),
1678 the_len, M_DONTWAIT);
1679 #ifdef SCTP_MBUF_LOGGING
1680 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1685 if (SCTP_BUF_IS_EXTENDED(mat)) {
1686 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1688 mat = SCTP_BUF_NEXT(mat);
1693 /* We can steal the last chunk */
1697 /* lop off the top part */
1698 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1699 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1700 l_len = SCTP_BUF_LEN(dmbuf);
1703 * need to count up the size hopefully does not hit
1711 l_len += SCTP_BUF_LEN(lat);
1712 lat = SCTP_BUF_NEXT(lat);
1715 if (l_len > the_len) {
1716 /* Trim the end round bytes off too */
1717 m_adj(dmbuf, -(l_len - the_len));
1720 if (dmbuf == NULL) {
1721 SCTP_STAT_INCR(sctps_nomem);
1724 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1725 asoc->fragmented_delivery_inprogress == 0 &&
1726 TAILQ_EMPTY(&asoc->resetHead) &&
1728 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1729 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1730 /* Candidate for express delivery */
1732 * Its not fragmented, No PD-API is up, Nothing in the
1733 * delivery queue, Its un-ordered OR ordered and the next to
1734 * deliver AND nothing else is stuck on the stream queue,
1735 * And there is room for it in the socket buffer. Lets just
1736 * stuff it up the buffer....
1739 /* It would be nice to avoid this copy if we could :< */
1740 sctp_alloc_a_readq(stcb, control);
1741 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1747 if (control == NULL) {
1748 goto failed_express_del;
1750 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1751 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1752 asoc->highest_tsn_inside_nr_map = tsn;
1754 sctp_add_to_readq(stcb->sctp_ep, stcb,
1755 control, &stcb->sctp_socket->so_rcv,
1756 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1758 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1759 /* for ordered, bump what we delivered */
1760 asoc->strmin[strmno].last_sequence_delivered++;
1762 SCTP_STAT_INCR(sctps_recvexpress);
1763 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1764 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1765 SCTP_STR_LOG_FROM_EXPRS_DEL);
1769 goto finish_express_del;
1772 /* If we reach here this is a new chunk */
1775 /* Express for fragmented delivery? */
1776 if ((asoc->fragmented_delivery_inprogress) &&
1777 (stcb->asoc.control_pdapi) &&
1778 (asoc->str_of_pdapi == strmno) &&
1779 (asoc->ssn_of_pdapi == strmseq)
1781 control = stcb->asoc.control_pdapi;
1782 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1783 /* Can't be another first? */
1784 goto failed_pdapi_express_del;
1786 if (tsn == (control->sinfo_tsn + 1)) {
1787 /* Yep, we can add it on */
1791 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1794 cumack = asoc->cumulative_tsn;
1795 if ((cumack + 1) == tsn)
1798 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1800 &stcb->sctp_socket->so_rcv)) {
1801 SCTP_PRINTF("Append fails end:%d\n", end);
1802 goto failed_pdapi_express_del;
1804 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1805 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1806 asoc->highest_tsn_inside_nr_map = tsn;
1808 SCTP_STAT_INCR(sctps_recvexpressm);
1809 control->sinfo_tsn = tsn;
1810 asoc->tsn_last_delivered = tsn;
1811 asoc->fragment_flags = chunk_flags;
1812 asoc->tsn_of_pdapi_last_delivered = tsn;
1813 asoc->last_flags_delivered = chunk_flags;
1814 asoc->last_strm_seq_delivered = strmseq;
1815 asoc->last_strm_no_delivered = strmno;
1817 /* clean up the flags and such */
1818 asoc->fragmented_delivery_inprogress = 0;
1819 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1820 asoc->strmin[strmno].last_sequence_delivered++;
1822 stcb->asoc.control_pdapi = NULL;
1823 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1825 * There could be another message
1828 need_reasm_check = 1;
1832 goto finish_express_del;
1835 failed_pdapi_express_del:
1837 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1838 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1839 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1840 asoc->highest_tsn_inside_nr_map = tsn;
1843 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1844 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1845 asoc->highest_tsn_inside_map = tsn;
1848 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1849 sctp_alloc_a_chunk(stcb, chk);
1851 /* No memory so we drop the chunk */
1852 SCTP_STAT_INCR(sctps_nomem);
1853 if (last_chunk == 0) {
1854 /* we copied it, free the copy */
1855 sctp_m_freem(dmbuf);
1859 chk->rec.data.TSN_seq = tsn;
1860 chk->no_fr_allowed = 0;
1861 chk->rec.data.stream_seq = strmseq;
1862 chk->rec.data.stream_number = strmno;
1863 chk->rec.data.payloadtype = protocol_id;
1864 chk->rec.data.context = stcb->asoc.context;
1865 chk->rec.data.doing_fast_retransmit = 0;
1866 chk->rec.data.rcv_flags = chunk_flags;
1868 chk->send_size = the_len;
1870 atomic_add_int(&net->ref_count, 1);
1873 sctp_alloc_a_readq(stcb, control);
1874 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1880 if (control == NULL) {
1881 /* No memory so we drop the chunk */
1882 SCTP_STAT_INCR(sctps_nomem);
1883 if (last_chunk == 0) {
1884 /* we copied it, free the copy */
1885 sctp_m_freem(dmbuf);
1889 control->length = the_len;
1892 /* Mark it as received */
1893 /* Now queue it where it belongs */
1894 if (control != NULL) {
1895 /* First a sanity check */
1896 if (asoc->fragmented_delivery_inprogress) {
1898 * Ok, we have a fragmented delivery in progress if
1899 * this chunk is next to deliver OR belongs in our
1900 * view to the reassembly, the peer is evil or
1903 uint32_t estimate_tsn;
1905 estimate_tsn = asoc->tsn_last_delivered + 1;
1906 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1907 (estimate_tsn == control->sinfo_tsn)) {
1908 /* Evil/Broke peer */
1909 sctp_m_freem(control->data);
1910 control->data = NULL;
1911 if (control->whoFrom) {
1912 sctp_free_remote_addr(control->whoFrom);
1913 control->whoFrom = NULL;
1915 sctp_free_a_readq(stcb, control);
1916 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1917 0, M_DONTWAIT, 1, MT_DATA);
1919 struct sctp_paramhdr *ph;
1922 SCTP_BUF_LEN(oper) =
1923 sizeof(struct sctp_paramhdr) +
1924 (3 * sizeof(uint32_t));
1925 ph = mtod(oper, struct sctp_paramhdr *);
1927 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1928 ph->param_length = htons(SCTP_BUF_LEN(oper));
1929 ippp = (uint32_t *) (ph + 1);
1930 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1934 *ippp = ((strmno << 16) | strmseq);
1936 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1937 sctp_abort_an_association(stcb->sctp_ep, stcb,
1938 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1943 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1944 sctp_m_freem(control->data);
1945 control->data = NULL;
1946 if (control->whoFrom) {
1947 sctp_free_remote_addr(control->whoFrom);
1948 control->whoFrom = NULL;
1950 sctp_free_a_readq(stcb, control);
1952 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1953 0, M_DONTWAIT, 1, MT_DATA);
1955 struct sctp_paramhdr *ph;
1958 SCTP_BUF_LEN(oper) =
1959 sizeof(struct sctp_paramhdr) +
1960 (3 * sizeof(uint32_t));
1962 struct sctp_paramhdr *);
1964 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1966 htons(SCTP_BUF_LEN(oper));
1967 ippp = (uint32_t *) (ph + 1);
1968 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1972 *ippp = ((strmno << 16) | strmseq);
1974 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1975 sctp_abort_an_association(stcb->sctp_ep,
1976 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1983 /* No PDAPI running */
1984 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1986 * Reassembly queue is NOT empty validate
1987 * that this tsn does not need to be in
1988 * reasembly queue. If it does then our peer
1989 * is broken or evil.
1991 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1992 sctp_m_freem(control->data);
1993 control->data = NULL;
1994 if (control->whoFrom) {
1995 sctp_free_remote_addr(control->whoFrom);
1996 control->whoFrom = NULL;
1998 sctp_free_a_readq(stcb, control);
1999 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2000 0, M_DONTWAIT, 1, MT_DATA);
2002 struct sctp_paramhdr *ph;
2005 SCTP_BUF_LEN(oper) =
2006 sizeof(struct sctp_paramhdr) +
2007 (3 * sizeof(uint32_t));
2009 struct sctp_paramhdr *);
2011 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2013 htons(SCTP_BUF_LEN(oper));
2014 ippp = (uint32_t *) (ph + 1);
2015 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2019 *ippp = ((strmno << 16) | strmseq);
2021 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2022 sctp_abort_an_association(stcb->sctp_ep,
2023 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2030 /* ok, if we reach here we have passed the sanity checks */
2031 if (chunk_flags & SCTP_DATA_UNORDERED) {
2032 /* queue directly into socket buffer */
2033 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2034 sctp_add_to_readq(stcb->sctp_ep, stcb,
2036 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2039 * Special check for when streams are resetting. We
2040 * could be more smart about this and check the
2041 * actual stream to see if it is not being reset..
2042 * that way we would not create a HOLB when amongst
2043 * streams being reset and those not being reset.
2045 * We take complete messages that have a stream reset
2046 * intervening (aka the TSN is after where our
2047 * cum-ack needs to be) off and put them on a
2048 * pending_reply_queue. The reassembly ones we do
2049 * not have to worry about since they are all sorted
2050 * and proceessed by TSN order. It is only the
2051 * singletons I must worry about.
2053 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2054 SCTP_TSN_GT(tsn, liste->tsn)) {
2056 * yep its past where we need to reset... go
2057 * ahead and queue it.
2059 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2061 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2063 struct sctp_queued_to_read *ctlOn,
2065 unsigned char inserted = 0;
2067 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2068 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2072 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2077 if (inserted == 0) {
2079 * must be put at end, use
2080 * prevP (all setup from
2081 * loop) to setup nextP.
2083 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2087 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2094 /* Into the re-assembly queue */
2095 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2098 * the assoc is now gone and chk was put onto the
2099 * reasm queue, which has all been freed.
2106 if (tsn == (asoc->cumulative_tsn + 1)) {
2107 /* Update cum-ack */
2108 asoc->cumulative_tsn = tsn;
2114 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2116 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2118 SCTP_STAT_INCR(sctps_recvdata);
2119 /* Set it present please */
2120 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2121 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2123 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2124 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2125 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2127 /* check the special flag for stream resets */
2128 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2129 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2131 * we have finished working through the backlogged TSN's now
2132 * time to reset streams. 1: call reset function. 2: free
2133 * pending_reply space 3: distribute any chunks in
2134 * pending_reply_queue.
2136 struct sctp_queued_to_read *ctl, *nctl;
2138 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2139 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2140 SCTP_FREE(liste, SCTP_M_STRESET);
2141 /* sa_ignore FREED_MEMORY */
2142 liste = TAILQ_FIRST(&asoc->resetHead);
2143 if (TAILQ_EMPTY(&asoc->resetHead)) {
2144 /* All can be removed */
2145 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2146 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2147 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2153 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2154 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2158 * if ctl->sinfo_tsn is <= liste->tsn we can
2159 * process it which is the NOT of
2160 * ctl->sinfo_tsn > liste->tsn
2162 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2163 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2170 * Now service re-assembly to pick up anything that has been
2171 * held on reassembly queue?
2173 sctp_deliver_reasm_check(stcb, asoc);
2174 need_reasm_check = 0;
2176 if (need_reasm_check) {
2177 /* Another one waits ? */
2178 sctp_deliver_reasm_check(stcb, asoc);
2183 int8_t sctp_map_lookup_tab[256] = {
2184 0, 1, 0, 2, 0, 1, 0, 3,
2185 0, 1, 0, 2, 0, 1, 0, 4,
2186 0, 1, 0, 2, 0, 1, 0, 3,
2187 0, 1, 0, 2, 0, 1, 0, 5,
2188 0, 1, 0, 2, 0, 1, 0, 3,
2189 0, 1, 0, 2, 0, 1, 0, 4,
2190 0, 1, 0, 2, 0, 1, 0, 3,
2191 0, 1, 0, 2, 0, 1, 0, 6,
2192 0, 1, 0, 2, 0, 1, 0, 3,
2193 0, 1, 0, 2, 0, 1, 0, 4,
2194 0, 1, 0, 2, 0, 1, 0, 3,
2195 0, 1, 0, 2, 0, 1, 0, 5,
2196 0, 1, 0, 2, 0, 1, 0, 3,
2197 0, 1, 0, 2, 0, 1, 0, 4,
2198 0, 1, 0, 2, 0, 1, 0, 3,
2199 0, 1, 0, 2, 0, 1, 0, 7,
2200 0, 1, 0, 2, 0, 1, 0, 3,
2201 0, 1, 0, 2, 0, 1, 0, 4,
2202 0, 1, 0, 2, 0, 1, 0, 3,
2203 0, 1, 0, 2, 0, 1, 0, 5,
2204 0, 1, 0, 2, 0, 1, 0, 3,
2205 0, 1, 0, 2, 0, 1, 0, 4,
2206 0, 1, 0, 2, 0, 1, 0, 3,
2207 0, 1, 0, 2, 0, 1, 0, 6,
2208 0, 1, 0, 2, 0, 1, 0, 3,
2209 0, 1, 0, 2, 0, 1, 0, 4,
2210 0, 1, 0, 2, 0, 1, 0, 3,
2211 0, 1, 0, 2, 0, 1, 0, 5,
2212 0, 1, 0, 2, 0, 1, 0, 3,
2213 0, 1, 0, 2, 0, 1, 0, 4,
2214 0, 1, 0, 2, 0, 1, 0, 3,
2215 0, 1, 0, 2, 0, 1, 0, 8
2220 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2223 * Now we also need to check the mapping array in a couple of ways.
2224 * 1) Did we move the cum-ack point?
2226 * When you first glance at this you might think that all entries that
2227 * make up the postion of the cum-ack would be in the nr-mapping
2228 * array only.. i.e. things up to the cum-ack are always
2229 * deliverable. Thats true with one exception, when its a fragmented
2230 * message we may not deliver the data until some threshold (or all
2231 * of it) is in place. So we must OR the nr_mapping_array and
2232 * mapping_array to get a true picture of the cum-ack.
2234 struct sctp_association *asoc;
2237 int slide_from, slide_end, lgap, distance;
2238 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2243 old_cumack = asoc->cumulative_tsn;
2244 old_base = asoc->mapping_array_base_tsn;
2245 old_highest = asoc->highest_tsn_inside_map;
2247 * We could probably improve this a small bit by calculating the
2248 * offset of the current cum-ack as the starting point.
2251 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2252 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2256 /* there is a 0 bit */
2257 at += sctp_map_lookup_tab[val];
2261 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2263 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2264 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2266 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2267 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2269 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2270 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2271 sctp_print_mapping_array(asoc);
2272 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2273 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2275 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2276 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2279 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2280 highest_tsn = asoc->highest_tsn_inside_nr_map;
2282 highest_tsn = asoc->highest_tsn_inside_map;
2284 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2285 /* The complete array was completed by a single FR */
2286 /* highest becomes the cum-ack */
2294 /* clear the array */
2295 clr = ((at + 7) >> 3);
2296 if (clr > asoc->mapping_array_size) {
2297 clr = asoc->mapping_array_size;
2299 memset(asoc->mapping_array, 0, clr);
2300 memset(asoc->nr_mapping_array, 0, clr);
2302 for (i = 0; i < asoc->mapping_array_size; i++) {
2303 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2304 printf("Error Mapping array's not clean at clear\n");
2305 sctp_print_mapping_array(asoc);
2309 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2310 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2311 } else if (at >= 8) {
2312 /* we can slide the mapping array down */
2313 /* slide_from holds where we hit the first NON 0xff byte */
2316 * now calculate the ceiling of the move using our highest
2319 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2320 slide_end = (lgap >> 3);
2321 if (slide_end < slide_from) {
2322 sctp_print_mapping_array(asoc);
2324 panic("impossible slide");
2326 printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2327 lgap, slide_end, slide_from, at);
2331 if (slide_end > asoc->mapping_array_size) {
2333 panic("would overrun buffer");
2335 printf("Gak, would have overrun map end:%d slide_end:%d\n",
2336 asoc->mapping_array_size, slide_end);
2337 slide_end = asoc->mapping_array_size;
2340 distance = (slide_end - slide_from) + 1;
2341 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2342 sctp_log_map(old_base, old_cumack, old_highest,
2343 SCTP_MAP_PREPARE_SLIDE);
2344 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2345 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2347 if (distance + slide_from > asoc->mapping_array_size ||
2350 * Here we do NOT slide forward the array so that
2351 * hopefully when more data comes in to fill it up
2352 * we will be able to slide it forward. Really I
2353 * don't think this should happen :-0
2356 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2357 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2358 (uint32_t) asoc->mapping_array_size,
2359 SCTP_MAP_SLIDE_NONE);
2364 for (ii = 0; ii < distance; ii++) {
2365 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2366 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2369 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2370 asoc->mapping_array[ii] = 0;
2371 asoc->nr_mapping_array[ii] = 0;
2373 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2374 asoc->highest_tsn_inside_map += (slide_from << 3);
2376 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2377 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2379 asoc->mapping_array_base_tsn += (slide_from << 3);
2380 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2381 sctp_log_map(asoc->mapping_array_base_tsn,
2382 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2383 SCTP_MAP_SLIDE_RESULT);
2391 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2393 struct sctp_association *asoc;
2394 uint32_t highest_tsn;
2397 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2398 highest_tsn = asoc->highest_tsn_inside_nr_map;
2400 highest_tsn = asoc->highest_tsn_inside_map;
2404 * Now we need to see if we need to queue a sack or just start the
2405 * timer (if allowed).
2407 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2409 * Ok special case, in SHUTDOWN-SENT case. here we maker
2410 * sure SACK timer is off and instead send a SHUTDOWN and a
2413 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2414 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2415 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2417 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2418 sctp_send_sack(stcb);
2422 /* is there a gap now ? */
2423 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2426 * CMT DAC algorithm: increase number of packets received
2429 stcb->asoc.cmt_dac_pkts_rcvd++;
2431 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2433 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2435 (stcb->asoc.numduptsns) || /* we have dup's */
2436 (is_a_gap) || /* is still a gap */
2437 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2438 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2441 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2442 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2443 (stcb->asoc.send_sack == 0) &&
2444 (stcb->asoc.numduptsns == 0) &&
2445 (stcb->asoc.delayed_ack) &&
2446 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2449 * CMT DAC algorithm: With CMT, delay acks
2450 * even in the face of
2452 * reordering. Therefore, if acks that do not
2453 * have to be sent because of the above
2454 * reasons, will be delayed. That is, acks
2455 * that would have been sent due to gap
2456 * reports will be delayed with DAC. Start
2457 * the delayed ack timer.
2459 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2460 stcb->sctp_ep, stcb, NULL);
2463 * Ok we must build a SACK since the timer
2464 * is pending, we got our first packet OR
2465 * there are gaps or duplicates.
2467 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2468 sctp_send_sack(stcb);
2471 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2472 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2473 stcb->sctp_ep, stcb, NULL);
2480 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2482 struct sctp_tmit_chunk *chk;
2483 uint32_t tsize, pd_point;
2486 if (asoc->fragmented_delivery_inprogress) {
2487 sctp_service_reassembly(stcb, asoc);
2489 /* Can we proceed further, i.e. the PD-API is complete */
2490 if (asoc->fragmented_delivery_inprogress) {
2495 * Now is there some other chunk I can deliver from the reassembly
2499 chk = TAILQ_FIRST(&asoc->reasmqueue);
2501 asoc->size_on_reasm_queue = 0;
2502 asoc->cnt_on_reasm_queue = 0;
2505 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2506 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2507 ((nxt_todel == chk->rec.data.stream_seq) ||
2508 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2510 * Yep the first one is here. We setup to start reception,
2511 * by backing down the TSN just in case we can't deliver.
2515 * Before we start though either all of the message should
2516 * be here or the socket buffer max or nothing on the
2517 * delivery queue and something can be delivered.
2519 if (stcb->sctp_socket) {
2520 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2521 stcb->sctp_ep->partial_delivery_point);
2523 pd_point = stcb->sctp_ep->partial_delivery_point;
2525 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2526 asoc->fragmented_delivery_inprogress = 1;
2527 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2528 asoc->str_of_pdapi = chk->rec.data.stream_number;
2529 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2530 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2531 asoc->fragment_flags = chk->rec.data.rcv_flags;
2532 sctp_service_reassembly(stcb, asoc);
2533 if (asoc->fragmented_delivery_inprogress == 0) {
2541 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2542 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2543 struct sctp_nets *net, uint32_t * high_tsn)
2545 struct sctp_data_chunk *ch, chunk_buf;
2546 struct sctp_association *asoc;
2547 int num_chunks = 0; /* number of control chunks processed */
2549 int chk_length, break_flag, last_chunk;
2550 int abort_flag = 0, was_a_gap;
2552 uint32_t highest_tsn;
2555 sctp_set_rwnd(stcb, &stcb->asoc);
2558 SCTP_TCB_LOCK_ASSERT(stcb);
2560 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2561 highest_tsn = asoc->highest_tsn_inside_nr_map;
2563 highest_tsn = asoc->highest_tsn_inside_map;
2565 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2567 * setup where we got the last DATA packet from for any SACK that
2568 * may need to go out. Don't bump the net. This is done ONLY when a
2569 * chunk is assigned.
2571 asoc->last_data_chunk_from = net;
2574 * Now before we proceed we must figure out if this is a wasted
2575 * cluster... i.e. it is a small packet sent in and yet the driver
2576 * underneath allocated a full cluster for it. If so we must copy it
2577 * to a smaller mbuf and free up the cluster mbuf. This will help
2578 * with cluster starvation. Note for __Panda__ we don't do this
2579 * since it has clusters all the way down to 64 bytes.
2581 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2582 /* we only handle mbufs that are singletons.. not chains */
2583 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2585 /* ok lets see if we can copy the data up */
2588 /* get the pointers and copy */
2589 to = mtod(m, caddr_t *);
2590 from = mtod((*mm), caddr_t *);
2591 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2592 /* copy the length and free up the old */
2593 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2595 /* sucess, back copy */
2598 /* We are in trouble in the mbuf world .. yikes */
2602 /* get pointer to the first chunk header */
2603 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2604 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2609 * process all DATA chunks...
2611 *high_tsn = asoc->cumulative_tsn;
2613 asoc->data_pkts_seen++;
2614 while (stop_proc == 0) {
2615 /* validate chunk length */
2616 chk_length = ntohs(ch->ch.chunk_length);
2617 if (length - *offset < chk_length) {
2618 /* all done, mutulated chunk */
2622 if (ch->ch.chunk_type == SCTP_DATA) {
2623 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2625 * Need to send an abort since we had a
2626 * invalid data chunk.
2628 struct mbuf *op_err;
2630 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2631 0, M_DONTWAIT, 1, MT_DATA);
2634 struct sctp_paramhdr *ph;
2637 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2638 (2 * sizeof(uint32_t));
2639 ph = mtod(op_err, struct sctp_paramhdr *);
2641 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2642 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2643 ippp = (uint32_t *) (ph + 1);
2644 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2646 *ippp = asoc->cumulative_tsn;
2649 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2650 sctp_abort_association(inp, stcb, m, iphlen, sh,
2651 op_err, 0, net->port);
2654 #ifdef SCTP_AUDITING_ENABLED
2655 sctp_audit_log(0xB1, 0);
2657 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2662 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2663 chk_length, net, high_tsn, &abort_flag, &break_flag,
2672 * Set because of out of rwnd space and no
2673 * drop rep space left.
2679 /* not a data chunk in the data region */
2680 switch (ch->ch.chunk_type) {
2681 case SCTP_INITIATION:
2682 case SCTP_INITIATION_ACK:
2683 case SCTP_SELECTIVE_ACK:
2684 case SCTP_NR_SELECTIVE_ACK: /* EY */
2685 case SCTP_HEARTBEAT_REQUEST:
2686 case SCTP_HEARTBEAT_ACK:
2687 case SCTP_ABORT_ASSOCIATION:
2689 case SCTP_SHUTDOWN_ACK:
2690 case SCTP_OPERATION_ERROR:
2691 case SCTP_COOKIE_ECHO:
2692 case SCTP_COOKIE_ACK:
2695 case SCTP_SHUTDOWN_COMPLETE:
2696 case SCTP_AUTHENTICATION:
2697 case SCTP_ASCONF_ACK:
2698 case SCTP_PACKET_DROPPED:
2699 case SCTP_STREAM_RESET:
2700 case SCTP_FORWARD_CUM_TSN:
2703 * Now, what do we do with KNOWN chunks that
2704 * are NOT in the right place?
2706 * For now, I do nothing but ignore them. We
2707 * may later want to add sysctl stuff to
2708 * switch out and do either an ABORT() or
2709 * possibly process them.
2711 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2712 struct mbuf *op_err;
2714 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2715 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2720 /* unknown chunk type, use bit rules */
2721 if (ch->ch.chunk_type & 0x40) {
2722 /* Add a error report to the queue */
2724 struct sctp_paramhdr *phd;
2726 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2728 phd = mtod(merr, struct sctp_paramhdr *);
2730 * We cheat and use param
2731 * type since we did not
2732 * bother to define a error
2733 * cause struct. They are
2734 * the same basic format
2735 * with different names.
2738 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2740 htons(chk_length + sizeof(*phd));
2741 SCTP_BUF_LEN(merr) = sizeof(*phd);
2742 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2743 SCTP_SIZE32(chk_length),
2745 if (SCTP_BUF_NEXT(merr)) {
2746 sctp_queue_op_err(stcb, merr);
2752 if ((ch->ch.chunk_type & 0x80) == 0) {
2753 /* discard the rest of this packet */
2755 } /* else skip this bad chunk and
2758 }; /* switch of chunk type */
2760 *offset += SCTP_SIZE32(chk_length);
2761 if ((*offset >= length) || stop_proc) {
2762 /* no more data left in the mbuf chain */
2766 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2767 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2777 * we need to report rwnd overrun drops.
2779 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2783 * Did we get data, if so update the time for auto-close and
2784 * give peer credit for being alive.
2786 SCTP_STAT_INCR(sctps_recvpktwithdata);
2787 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2788 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2789 stcb->asoc.overall_error_count,
2791 SCTP_FROM_SCTP_INDATA,
2794 stcb->asoc.overall_error_count = 0;
2795 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2797 /* now service all of the reassm queue if needed */
2798 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2799 sctp_service_queues(stcb, asoc);
2801 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2802 /* Assure that we ack right away */
2803 stcb->asoc.send_sack = 1;
2805 /* Start a sack timer or QUEUE a SACK for sending */
2806 sctp_sack_check(stcb, was_a_gap, &abort_flag);
2814 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2815 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2817 uint32_t * biggest_newly_acked_tsn,
2818 uint32_t * this_sack_lowest_newack,
2821 struct sctp_tmit_chunk *tp1;
2822 unsigned int theTSN;
2823 int j, wake_him = 0, circled = 0;
2825 /* Recover the tp1 we last saw */
2828 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2830 for (j = frag_strt; j <= frag_end; j++) {
2831 theTSN = j + last_tsn;
2833 if (tp1->rec.data.doing_fast_retransmit)
2837 * CMT: CUCv2 algorithm. For each TSN being
2838 * processed from the sent queue, track the
2839 * next expected pseudo-cumack, or
2840 * rtx_pseudo_cumack, if required. Separate
2841 * cumack trackers for first transmissions,
2842 * and retransmissions.
2844 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2845 (tp1->snd_count == 1)) {
2846 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2847 tp1->whoTo->find_pseudo_cumack = 0;
2849 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2850 (tp1->snd_count > 1)) {
2851 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2852 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2854 if (tp1->rec.data.TSN_seq == theTSN) {
2855 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2857 * must be held until
2861 * ECN Nonce: Add the nonce
2862 * value to the sender's
2865 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2867 * If it is less than RESEND, it is
2868 * now no-longer in flight.
2869 * Higher values may already be set
2870 * via previous Gap Ack Blocks...
2871 * i.e. ACKED or RESEND.
2873 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2874 *biggest_newly_acked_tsn)) {
2875 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2878 * CMT: SFR algo (and HTNA) - set
2879 * saw_newack to 1 for dest being
2880 * newly acked. update
2881 * this_sack_highest_newack if
2884 if (tp1->rec.data.chunk_was_revoked == 0)
2885 tp1->whoTo->saw_newack = 1;
2887 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2888 tp1->whoTo->this_sack_highest_newack)) {
2889 tp1->whoTo->this_sack_highest_newack =
2890 tp1->rec.data.TSN_seq;
2893 * CMT DAC algo: also update
2894 * this_sack_lowest_newack
2896 if (*this_sack_lowest_newack == 0) {
2897 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2898 sctp_log_sack(*this_sack_lowest_newack,
2900 tp1->rec.data.TSN_seq,
2903 SCTP_LOG_TSN_ACKED);
2905 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2908 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2909 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2910 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2911 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2912 * Separate pseudo_cumack trackers for first transmissions and
2915 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2916 if (tp1->rec.data.chunk_was_revoked == 0) {
2917 tp1->whoTo->new_pseudo_cumack = 1;
2919 tp1->whoTo->find_pseudo_cumack = 1;
2921 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2922 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2924 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2925 if (tp1->rec.data.chunk_was_revoked == 0) {
2926 tp1->whoTo->new_pseudo_cumack = 1;
2928 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2930 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2931 sctp_log_sack(*biggest_newly_acked_tsn,
2933 tp1->rec.data.TSN_seq,
2936 SCTP_LOG_TSN_ACKED);
2938 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2939 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2940 tp1->whoTo->flight_size,
2942 (uintptr_t) tp1->whoTo,
2943 tp1->rec.data.TSN_seq);
2945 sctp_flight_size_decrease(tp1);
2946 sctp_total_flight_decrease(stcb, tp1);
2948 tp1->whoTo->net_ack += tp1->send_size;
2949 if (tp1->snd_count < 2) {
2951 * True non-retransmited chunk
2953 tp1->whoTo->net_ack2 += tp1->send_size;
2960 sctp_calculate_rto(stcb,
2963 &tp1->sent_rcv_time,
2964 sctp_align_safe_nocopy);
2969 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2970 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2971 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2972 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2973 stcb->asoc.this_sack_highest_gap)) {
2974 stcb->asoc.this_sack_highest_gap =
2975 tp1->rec.data.TSN_seq;
2977 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2978 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2979 #ifdef SCTP_AUDITING_ENABLED
2980 sctp_audit_log(0xB2,
2981 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2986 * All chunks NOT UNSENT fall through here and are marked
2987 * (leave PR-SCTP ones that are to skip alone though)
2989 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
2990 tp1->sent = SCTP_DATAGRAM_MARKED;
2992 if (tp1->rec.data.chunk_was_revoked) {
2993 /* deflate the cwnd */
2994 tp1->whoTo->cwnd -= tp1->book_size;
2995 tp1->rec.data.chunk_was_revoked = 0;
2997 /* NR Sack code here */
3004 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3005 sctp_m_freem(tp1->data);
3012 } /* if (tp1->TSN_seq == theTSN) */
3013 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3016 tp1 = TAILQ_NEXT(tp1, sctp_next);
3017 if ((tp1 == NULL) && (circled == 0)) {
3019 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3021 } /* end while (tp1) */
3024 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3026 /* In case the fragments were not in order we must reset */
3027 } /* end for (j = fragStart */
3029 return (wake_him); /* Return value only used for nr-sack */
3034 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3035 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3036 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3037 int num_seg, int num_nr_seg, int *ecn_seg_sums)
3039 struct sctp_gap_ack_block *frag, block;
3040 struct sctp_tmit_chunk *tp1;
3045 uint16_t frag_strt, frag_end, prev_frag_end;
3047 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3051 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3054 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3056 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3057 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3058 *offset += sizeof(block);
3060 return (chunk_freed);
3062 frag_strt = ntohs(frag->start);
3063 frag_end = ntohs(frag->end);
3065 if (frag_strt > frag_end) {
3066 /* This gap report is malformed, skip it. */
3069 if (frag_strt <= prev_frag_end) {
3070 /* This gap report is not in order, so restart. */
3071 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3073 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3074 *biggest_tsn_acked = last_tsn + frag_end;
3081 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3082 non_revocable, &num_frs, biggest_newly_acked_tsn,
3083 this_sack_lowest_newack, ecn_seg_sums)) {
3086 prev_frag_end = frag_end;
3088 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3090 sctp_log_fr(*biggest_tsn_acked,
3091 *biggest_newly_acked_tsn,
3092 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3094 return (chunk_freed);
3098 sctp_check_for_revoked(struct sctp_tcb *stcb,
3099 struct sctp_association *asoc, uint32_t cumack,
3100 uint32_t biggest_tsn_acked)
3102 struct sctp_tmit_chunk *tp1;
3103 int tot_revoked = 0;
3105 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3106 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3108 * ok this guy is either ACK or MARKED. If it is
3109 * ACKED it has been previously acked but not this
3110 * time i.e. revoked. If it is MARKED it was ACK'ed
3113 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3116 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3117 /* it has been revoked */
3118 tp1->sent = SCTP_DATAGRAM_SENT;
3119 tp1->rec.data.chunk_was_revoked = 1;
3121 * We must add this stuff back in to assure
3122 * timers and such get started.
3124 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3125 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3126 tp1->whoTo->flight_size,
3128 (uintptr_t) tp1->whoTo,
3129 tp1->rec.data.TSN_seq);
3131 sctp_flight_size_increase(tp1);
3132 sctp_total_flight_increase(stcb, tp1);
3134 * We inflate the cwnd to compensate for our
3135 * artificial inflation of the flight_size.
3137 tp1->whoTo->cwnd += tp1->book_size;
3139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3140 sctp_log_sack(asoc->last_acked_seq,
3142 tp1->rec.data.TSN_seq,
3145 SCTP_LOG_TSN_REVOKED);
3147 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3148 /* it has been re-acked in this SACK */
3149 tp1->sent = SCTP_DATAGRAM_ACKED;
3152 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3155 if (tot_revoked > 0) {
3157 * Setup the ecn nonce re-sync point. We do this since once
3158 * data is revoked we begin to retransmit things, which do
3159 * NOT have the ECN bits set. This means we are now out of
3160 * sync and must wait until we get back in sync with the
3161 * peer to check ECN bits.
3163 tp1 = TAILQ_FIRST(&asoc->send_queue);
3165 asoc->nonce_resync_tsn = asoc->sending_seq;
3167 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3169 asoc->nonce_wait_for_ecne = 0;
3170 asoc->nonce_sum_check = 0;
3176 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3177 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3179 struct sctp_tmit_chunk *tp1;
3180 int strike_flag = 0;
3182 int tot_retrans = 0;
3183 uint32_t sending_seq;
3184 struct sctp_nets *net;
3185 int num_dests_sacked = 0;
3188 * select the sending_seq, this is either the next thing ready to be
3189 * sent but not transmitted, OR, the next seq we assign.
3191 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3193 sending_seq = asoc->sending_seq;
3195 sending_seq = tp1->rec.data.TSN_seq;
3198 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3199 if ((asoc->sctp_cmt_on_off > 0) &&
3200 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3201 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3202 if (net->saw_newack)
3206 if (stcb->asoc.peer_supports_prsctp) {
3207 (void)SCTP_GETTIME_TIMEVAL(&now);
3209 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3211 if (tp1->no_fr_allowed) {
3212 /* this one had a timeout or something */
3215 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3216 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3217 sctp_log_fr(biggest_tsn_newly_acked,
3218 tp1->rec.data.TSN_seq,
3220 SCTP_FR_LOG_CHECK_STRIKE);
3222 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3223 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3227 if (stcb->asoc.peer_supports_prsctp) {
3228 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3229 /* Is it expired? */
3230 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3231 /* Yes so drop it */
3232 if (tp1->data != NULL) {
3233 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3234 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3235 SCTP_SO_NOT_LOCKED);
3241 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3242 /* we are beyond the tsn in the sack */
3245 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3246 /* either a RESEND, ACKED, or MARKED */
3248 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3249 /* Continue strikin FWD-TSN chunks */
3250 tp1->rec.data.fwd_tsn_cnt++;
3255 * CMT : SFR algo (covers part of DAC and HTNA as well)
3257 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3259 * No new acks were receieved for data sent to this
3260 * dest. Therefore, according to the SFR algo for
3261 * CMT, no data sent to this dest can be marked for
3262 * FR using this SACK.
3265 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3266 tp1->whoTo->this_sack_highest_newack)) {
3268 * CMT: New acks were receieved for data sent to
3269 * this dest. But no new acks were seen for data
3270 * sent after tp1. Therefore, according to the SFR
3271 * algo for CMT, tp1 cannot be marked for FR using
3272 * this SACK. This step covers part of the DAC algo
3273 * and the HTNA algo as well.
3278 * Here we check to see if we were have already done a FR
3279 * and if so we see if the biggest TSN we saw in the sack is
3280 * smaller than the recovery point. If so we don't strike
3281 * the tsn... otherwise we CAN strike the TSN.
3284 * @@@ JRI: Check for CMT if (accum_moved &&
3285 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3288 if (accum_moved && asoc->fast_retran_loss_recovery) {
3290 * Strike the TSN if in fast-recovery and cum-ack
3293 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3294 sctp_log_fr(biggest_tsn_newly_acked,
3295 tp1->rec.data.TSN_seq,
3297 SCTP_FR_LOG_STRIKE_CHUNK);
3299 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3302 if ((asoc->sctp_cmt_on_off > 0) &&
3303 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3305 * CMT DAC algorithm: If SACK flag is set to
3306 * 0, then lowest_newack test will not pass
3307 * because it would have been set to the
3308 * cumack earlier. If not already to be
3309 * rtx'd, If not a mixed sack and if tp1 is
3310 * not between two sacked TSNs, then mark by
3311 * one more. NOTE that we are marking by one
3312 * additional time since the SACK DAC flag
3313 * indicates that two packets have been
3314 * received after this missing TSN.
3316 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3317 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3318 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3319 sctp_log_fr(16 + num_dests_sacked,
3320 tp1->rec.data.TSN_seq,
3322 SCTP_FR_LOG_STRIKE_CHUNK);
3327 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3328 (asoc->sctp_cmt_on_off == 0)) {
3330 * For those that have done a FR we must take
3331 * special consideration if we strike. I.e the
3332 * biggest_newly_acked must be higher than the
3333 * sending_seq at the time we did the FR.
3336 #ifdef SCTP_FR_TO_ALTERNATE
3338 * If FR's go to new networks, then we must only do
3339 * this for singly homed asoc's. However if the FR's
3340 * go to the same network (Armando's work) then its
3341 * ok to FR multiple times.
3349 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3350 tp1->rec.data.fast_retran_tsn)) {
3352 * Strike the TSN, since this ack is
3353 * beyond where things were when we
3356 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3357 sctp_log_fr(biggest_tsn_newly_acked,
3358 tp1->rec.data.TSN_seq,
3360 SCTP_FR_LOG_STRIKE_CHUNK);
3362 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3366 if ((asoc->sctp_cmt_on_off > 0) &&
3367 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3369 * CMT DAC algorithm: If
3370 * SACK flag is set to 0,
3371 * then lowest_newack test
3372 * will not pass because it
3373 * would have been set to
3374 * the cumack earlier. If
3375 * not already to be rtx'd,
3376 * If not a mixed sack and
3377 * if tp1 is not between two
3378 * sacked TSNs, then mark by
3379 * one more. NOTE that we
3380 * are marking by one
3381 * additional time since the
3382 * SACK DAC flag indicates
3383 * that two packets have
3384 * been received after this
3387 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3388 (num_dests_sacked == 1) &&
3389 SCTP_TSN_GT(this_sack_lowest_newack,
3390 tp1->rec.data.TSN_seq)) {
3391 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3392 sctp_log_fr(32 + num_dests_sacked,
3393 tp1->rec.data.TSN_seq,
3395 SCTP_FR_LOG_STRIKE_CHUNK);
3397 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3405 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3408 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3409 biggest_tsn_newly_acked)) {
3411 * We don't strike these: This is the HTNA
3412 * algorithm i.e. we don't strike If our TSN is
3413 * larger than the Highest TSN Newly Acked.
3417 /* Strike the TSN */
3418 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3419 sctp_log_fr(biggest_tsn_newly_acked,
3420 tp1->rec.data.TSN_seq,
3422 SCTP_FR_LOG_STRIKE_CHUNK);
3424 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3427 if ((asoc->sctp_cmt_on_off > 0) &&
3428 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3430 * CMT DAC algorithm: If SACK flag is set to
3431 * 0, then lowest_newack test will not pass
3432 * because it would have been set to the
3433 * cumack earlier. If not already to be
3434 * rtx'd, If not a mixed sack and if tp1 is
3435 * not between two sacked TSNs, then mark by
3436 * one more. NOTE that we are marking by one
3437 * additional time since the SACK DAC flag
3438 * indicates that two packets have been
3439 * received after this missing TSN.
3441 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3442 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3444 sctp_log_fr(48 + num_dests_sacked,
3445 tp1->rec.data.TSN_seq,
3447 SCTP_FR_LOG_STRIKE_CHUNK);
3453 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3454 struct sctp_nets *alt;
3456 /* fix counts and things */
3457 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3458 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3459 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3461 (uintptr_t) tp1->whoTo,
3462 tp1->rec.data.TSN_seq);
3465 tp1->whoTo->net_ack++;
3466 sctp_flight_size_decrease(tp1);
3468 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3469 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3470 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3472 /* add back to the rwnd */
3473 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3475 /* remove from the total flight */
3476 sctp_total_flight_decrease(stcb, tp1);
3478 if ((stcb->asoc.peer_supports_prsctp) &&
3479 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3481 * Has it been retransmitted tv_sec times? -
3482 * we store the retran count there.
3484 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3485 /* Yes, so drop it */
3486 if (tp1->data != NULL) {
3487 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3488 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3489 SCTP_SO_NOT_LOCKED);
3491 /* Make sure to flag we had a FR */
3492 tp1->whoTo->net_ack++;
3496 /* printf("OK, we are now ready to FR this guy\n"); */
3497 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3498 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3502 /* This is a subsequent FR */
3503 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3505 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3506 if (asoc->sctp_cmt_on_off > 0) {
3508 * CMT: Using RTX_SSTHRESH policy for CMT.
3509 * If CMT is being used, then pick dest with
3510 * largest ssthresh for any retransmission.
3512 tp1->no_fr_allowed = 1;
3514 /* sa_ignore NO_NULL_CHK */
3515 if (asoc->sctp_cmt_pf > 0) {
3517 * JRS 5/18/07 - If CMT PF is on,
3518 * use the PF version of
3521 alt = sctp_find_alternate_net(stcb, alt, 2);
3524 * JRS 5/18/07 - If only CMT is on,
3525 * use the CMT version of
3528 /* sa_ignore NO_NULL_CHK */
3529 alt = sctp_find_alternate_net(stcb, alt, 1);
3535 * CUCv2: If a different dest is picked for
3536 * the retransmission, then new
3537 * (rtx-)pseudo_cumack needs to be tracked
3538 * for orig dest. Let CUCv2 track new (rtx-)
3539 * pseudo-cumack always.
3542 tp1->whoTo->find_pseudo_cumack = 1;
3543 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3545 } else {/* CMT is OFF */
3547 #ifdef SCTP_FR_TO_ALTERNATE
3548 /* Can we find an alternate? */
3549 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3552 * default behavior is to NOT retransmit
3553 * FR's to an alternate. Armando Caro's
3554 * paper details why.
3560 tp1->rec.data.doing_fast_retransmit = 1;
3562 /* mark the sending seq for possible subsequent FR's */
3564 * printf("Marking TSN for FR new value %x\n",
3565 * (uint32_t)tpi->rec.data.TSN_seq);
3567 if (TAILQ_EMPTY(&asoc->send_queue)) {
3569 * If the queue of send is empty then its
3570 * the next sequence number that will be
3571 * assigned so we subtract one from this to
3572 * get the one we last sent.
3574 tp1->rec.data.fast_retran_tsn = sending_seq;
3577 * If there are chunks on the send queue
3578 * (unsent data that has made it from the
3579 * stream queues but not out the door, we
3580 * take the first one (which will have the
3581 * lowest TSN) and subtract one to get the
3584 struct sctp_tmit_chunk *ttt;
3586 ttt = TAILQ_FIRST(&asoc->send_queue);
3587 tp1->rec.data.fast_retran_tsn =
3588 ttt->rec.data.TSN_seq;
3593 * this guy had a RTO calculation pending on
3598 if (alt != tp1->whoTo) {
3599 /* yes, there is an alternate. */
3600 sctp_free_remote_addr(tp1->whoTo);
3601 /* sa_ignore FREED_MEMORY */
3603 atomic_add_int(&alt->ref_count, 1);
3608 if (tot_retrans > 0) {
3610 * Setup the ecn nonce re-sync point. We do this since once
3611 * we go to FR something we introduce a Karn's rule scenario
3612 * and won't know the totals for the ECN bits.
3614 asoc->nonce_resync_tsn = sending_seq;
3615 asoc->nonce_wait_for_ecne = 0;
3616 asoc->nonce_sum_check = 0;
3620 struct sctp_tmit_chunk *
3621 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3622 struct sctp_association *asoc)
3624 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3628 if (asoc->peer_supports_prsctp == 0) {
3631 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3632 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3633 tp1->sent != SCTP_DATAGRAM_RESEND) {
3634 /* no chance to advance, out of here */
3637 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3638 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3639 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3640 asoc->advanced_peer_ack_point,
3641 tp1->rec.data.TSN_seq, 0, 0);
3644 if (!PR_SCTP_ENABLED(tp1->flags)) {
3646 * We can't fwd-tsn past any that are reliable aka
3647 * retransmitted until the asoc fails.
3652 (void)SCTP_GETTIME_TIMEVAL(&now);
3656 * now we got a chunk which is marked for another
3657 * retransmission to a PR-stream but has run out its chances
3658 * already maybe OR has been marked to skip now. Can we skip
3659 * it if its a resend?
3661 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3662 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3664 * Now is this one marked for resend and its time is
3667 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3668 /* Yes so drop it */
3670 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3671 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3672 SCTP_SO_NOT_LOCKED);
3676 * No, we are done when hit one for resend
3677 * whos time as not expired.
3683 * Ok now if this chunk is marked to drop it we can clean up
3684 * the chunk, advance our peer ack point and we can check
3687 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3688 /* advance PeerAckPoint goes forward */
3689 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3690 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3692 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3693 /* No update but we do save the chk */
3698 * If it is still in RESEND we can advance no
3708 sctp_fs_audit(struct sctp_association *asoc)
3710 struct sctp_tmit_chunk *chk;
3711 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3712 int entry_flight, entry_cnt, ret;
3714 entry_flight = asoc->total_flight;
3715 entry_cnt = asoc->total_flight_count;
3718 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3721 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3722 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3723 printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3724 chk->rec.data.TSN_seq,
3729 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3731 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3733 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3740 if ((inflight > 0) || (inbetween > 0)) {
3742 panic("Flight size-express incorrect? \n");
3744 printf("asoc->total_flight:%d cnt:%d\n",
3745 entry_flight, entry_cnt);
3747 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3748 inflight, inbetween, resend, above, acked);
3757 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3758 struct sctp_association *asoc,
3759 struct sctp_nets *net,
3760 struct sctp_tmit_chunk *tp1)
3762 tp1->window_probe = 0;
3763 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3764 /* TSN's skipped we do NOT move back. */
3765 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3766 tp1->whoTo->flight_size,
3768 (uintptr_t) tp1->whoTo,
3769 tp1->rec.data.TSN_seq);
3772 /* First setup this by shrinking flight */
3773 sctp_flight_size_decrease(tp1);
3774 sctp_total_flight_decrease(stcb, tp1);
3775 /* Now mark for resend */
3776 tp1->sent = SCTP_DATAGRAM_RESEND;
3777 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3779 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3780 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3781 tp1->whoTo->flight_size,
3783 (uintptr_t) tp1->whoTo,
3784 tp1->rec.data.TSN_seq);
3789 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3790 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3792 struct sctp_nets *net;
3793 struct sctp_association *asoc;
3794 struct sctp_tmit_chunk *tp1, *tp2;
3796 int win_probe_recovery = 0;
3797 int win_probe_recovered = 0;
3798 int j, done_once = 0;
3800 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3801 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3802 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3804 SCTP_TCB_LOCK_ASSERT(stcb);
3805 #ifdef SCTP_ASOCLOG_OF_TSNS
3806 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3807 stcb->asoc.cumack_log_at++;
3808 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3809 stcb->asoc.cumack_log_at = 0;
3813 old_rwnd = asoc->peers_rwnd;
3814 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3817 } else if (asoc->last_acked_seq == cumack) {
3818 /* Window update sack */
3819 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3820 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3821 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3822 /* SWS sender side engages */
3823 asoc->peers_rwnd = 0;
3825 if (asoc->peers_rwnd > old_rwnd) {
3830 /* First setup for CC stuff */
3831 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3832 net->prev_cwnd = net->cwnd;
3837 * CMT: Reset CUC and Fast recovery algo variables before
3840 net->new_pseudo_cumack = 0;
3841 net->will_exit_fast_recovery = 0;
3843 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3846 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3847 tp1 = TAILQ_LAST(&asoc->sent_queue,
3848 sctpchunk_listhead);
3849 send_s = tp1->rec.data.TSN_seq + 1;
3851 send_s = asoc->sending_seq;
3853 if (SCTP_TSN_GE(cumack, send_s)) {
3859 panic("Impossible sack 1");
3864 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3865 0, M_DONTWAIT, 1, MT_DATA);
3867 struct sctp_paramhdr *ph;
3870 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3872 ph = mtod(oper, struct sctp_paramhdr *);
3873 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3874 ph->param_length = htons(SCTP_BUF_LEN(oper));
3875 ippp = (uint32_t *) (ph + 1);
3876 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3878 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3879 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3884 asoc->this_sack_highest_gap = cumack;
3885 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3886 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3887 stcb->asoc.overall_error_count,
3889 SCTP_FROM_SCTP_INDATA,
3892 stcb->asoc.overall_error_count = 0;
3893 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3894 /* process the new consecutive TSN first */
3895 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3896 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3897 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3898 printf("Warning, an unsent is now acked?\n");
3901 * ECN Nonce: Add the nonce to the sender's
3904 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3905 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3907 * If it is less than ACKED, it is
3908 * now no-longer in flight. Higher
3909 * values may occur during marking
3911 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3912 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3913 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3914 tp1->whoTo->flight_size,
3916 (uintptr_t) tp1->whoTo,
3917 tp1->rec.data.TSN_seq);
3919 sctp_flight_size_decrease(tp1);
3920 /* sa_ignore NO_NULL_CHK */
3921 sctp_total_flight_decrease(stcb, tp1);
3923 tp1->whoTo->net_ack += tp1->send_size;
3924 if (tp1->snd_count < 2) {
3926 * True non-retransmited
3929 tp1->whoTo->net_ack2 +=
3932 /* update RTO too? */
3939 sctp_calculate_rto(stcb,
3941 &tp1->sent_rcv_time,
3942 sctp_align_safe_nocopy);
3947 * CMT: CUCv2 algorithm. From the
3948 * cumack'd TSNs, for each TSN being
3949 * acked for the first time, set the
3950 * following variables for the
3951 * corresp destination.
3952 * new_pseudo_cumack will trigger a
3954 * find_(rtx_)pseudo_cumack will
3955 * trigger search for the next
3956 * expected (rtx-)pseudo-cumack.
3958 tp1->whoTo->new_pseudo_cumack = 1;
3959 tp1->whoTo->find_pseudo_cumack = 1;
3960 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3962 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3963 /* sa_ignore NO_NULL_CHK */
3964 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3967 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3968 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3970 if (tp1->rec.data.chunk_was_revoked) {
3971 /* deflate the cwnd */
3972 tp1->whoTo->cwnd -= tp1->book_size;
3973 tp1->rec.data.chunk_was_revoked = 0;
3975 tp1->sent = SCTP_DATAGRAM_ACKED;
3976 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3978 /* sa_ignore NO_NULL_CHK */
3979 sctp_free_bufspace(stcb, asoc, tp1, 1);
3980 sctp_m_freem(tp1->data);
3983 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3984 sctp_log_sack(asoc->last_acked_seq,
3986 tp1->rec.data.TSN_seq,
3989 SCTP_LOG_FREE_SENT);
3991 asoc->sent_queue_cnt--;
3992 sctp_free_a_chunk(stcb, tp1);
3999 /* sa_ignore NO_NULL_CHK */
4000 if (stcb->sctp_socket) {
4001 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4005 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4006 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4007 /* sa_ignore NO_NULL_CHK */
4008 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4010 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4011 so = SCTP_INP_SO(stcb->sctp_ep);
4012 atomic_add_int(&stcb->asoc.refcnt, 1);
4013 SCTP_TCB_UNLOCK(stcb);
4014 SCTP_SOCKET_LOCK(so, 1);
4015 SCTP_TCB_LOCK(stcb);
4016 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4017 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4018 /* assoc was freed while we were unlocked */
4019 SCTP_SOCKET_UNLOCK(so, 1);
4023 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4024 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4025 SCTP_SOCKET_UNLOCK(so, 1);
4028 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4029 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4033 /* JRS - Use the congestion control given in the CC module */
4034 if (asoc->last_acked_seq != cumack)
4035 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4037 asoc->last_acked_seq = cumack;
4039 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4040 /* nothing left in-flight */
4041 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4042 net->flight_size = 0;
4043 net->partial_bytes_acked = 0;
4045 asoc->total_flight = 0;
4046 asoc->total_flight_count = 0;
4048 /* ECN Nonce updates */
4049 if (asoc->ecn_nonce_allowed) {
4050 if (asoc->nonce_sum_check) {
4051 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4052 if (asoc->nonce_wait_for_ecne == 0) {
4053 struct sctp_tmit_chunk *lchk;
4055 lchk = TAILQ_FIRST(&asoc->send_queue);
4056 asoc->nonce_wait_for_ecne = 1;
4058 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4060 asoc->nonce_wait_tsn = asoc->sending_seq;
4063 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->nonce_wait_tsn)) {
4065 * Misbehaving peer. We need
4066 * to react to this guy
4068 asoc->ecn_allowed = 0;
4069 asoc->ecn_nonce_allowed = 0;
4074 /* See if Resynchronization Possible */
4075 if (SCTP_TSN_GT(asoc->last_acked_seq, asoc->nonce_resync_tsn)) {
4076 asoc->nonce_sum_check = 1;
4078 * Now we must calculate what the base is.
4079 * We do this based on two things, we know
4080 * the total's for all the segments
4081 * gap-acked in the SACK (none). We also
4082 * know the SACK's nonce sum, its in
4083 * nonce_sum_flag. So we can build a truth
4084 * table to back-calculate the new value of
4085 * asoc->nonce_sum_expect_base:
4087 * SACK-flag-Value Seg-Sums Base 0 0 0
4091 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4096 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4097 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4098 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4099 /* SWS sender side engages */
4100 asoc->peers_rwnd = 0;
4102 if (asoc->peers_rwnd > old_rwnd) {
4103 win_probe_recovery = 1;
4105 /* Now assure a timer where data is queued at */
4108 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4111 if (win_probe_recovery && (net->window_probe)) {
4112 win_probe_recovered = 1;
4114 * Find first chunk that was used with window probe
4115 * and clear the sent
4117 /* sa_ignore FREED_MEMORY */
4118 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4119 if (tp1->window_probe) {
4120 /* move back to data send queue */
4121 sctp_window_probe_recovery(stcb, asoc, net, tp1);
4126 if (net->RTO == 0) {
4127 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4129 to_ticks = MSEC_TO_TICKS(net->RTO);
4131 if (net->flight_size) {
4133 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4134 sctp_timeout_handler, &net->rxt_timer);
4135 if (net->window_probe) {
4136 net->window_probe = 0;
4139 if (net->window_probe) {
4141 * In window probes we must assure a timer
4142 * is still running there
4144 net->window_probe = 0;
4145 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4146 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4147 sctp_timeout_handler, &net->rxt_timer);
4149 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4150 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4152 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4154 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4155 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4156 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4157 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4158 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4164 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4165 (asoc->sent_queue_retran_cnt == 0) &&
4166 (win_probe_recovered == 0) &&
4169 * huh, this should not happen unless all packets are
4170 * PR-SCTP and marked to skip of course.
4172 if (sctp_fs_audit(asoc)) {
4173 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4174 net->flight_size = 0;
4176 asoc->total_flight = 0;
4177 asoc->total_flight_count = 0;
4178 asoc->sent_queue_retran_cnt = 0;
4179 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4180 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4181 sctp_flight_size_increase(tp1);
4182 sctp_total_flight_increase(stcb, tp1);
4183 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4184 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4191 /**********************************/
4192 /* Now what about shutdown issues */
4193 /**********************************/
4194 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4195 /* nothing left on sendqueue.. consider done */
4197 if ((asoc->stream_queue_cnt == 1) &&
4198 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4199 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4200 (asoc->locked_on_sending)
4202 struct sctp_stream_queue_pending *sp;
4205 * I may be in a state where we got all across.. but
4206 * cannot write more due to a shutdown... we abort
4207 * since the user did not indicate EOR in this case.
4208 * The sp will be cleaned during free of the asoc.
4210 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4212 if ((sp) && (sp->length == 0)) {
4213 /* Let cleanup code purge it */
4214 if (sp->msg_is_complete) {
4215 asoc->stream_queue_cnt--;
4217 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4218 asoc->locked_on_sending = NULL;
4219 asoc->stream_queue_cnt--;
4223 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4224 (asoc->stream_queue_cnt == 0)) {
4225 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4226 /* Need to abort here */
4232 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4233 0, M_DONTWAIT, 1, MT_DATA);
4235 struct sctp_paramhdr *ph;
4238 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4240 ph = mtod(oper, struct sctp_paramhdr *);
4241 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4242 ph->param_length = htons(SCTP_BUF_LEN(oper));
4243 ippp = (uint32_t *) (ph + 1);
4244 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4246 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4247 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4249 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4250 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4251 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4253 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4254 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4255 sctp_stop_timers_for_shutdown(stcb);
4256 sctp_send_shutdown(stcb,
4257 stcb->asoc.primary_destination);
4258 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4259 stcb->sctp_ep, stcb, asoc->primary_destination);
4260 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4261 stcb->sctp_ep, stcb, asoc->primary_destination);
4263 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4264 (asoc->stream_queue_cnt == 0)) {
4265 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4268 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4269 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4270 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4271 sctp_send_shutdown_ack(stcb,
4272 stcb->asoc.primary_destination);
4273 sctp_stop_timers_for_shutdown(stcb);
4274 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4275 stcb->sctp_ep, stcb, asoc->primary_destination);
4278 /*********************************************/
4279 /* Here we perform PR-SCTP procedures */
4281 /*********************************************/
4282 /* C1. update advancedPeerAckPoint */
4283 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4284 asoc->advanced_peer_ack_point = cumack;
4286 /* PR-Sctp issues need to be addressed too */
4287 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4288 struct sctp_tmit_chunk *lchk;
4289 uint32_t old_adv_peer_ack_point;
4291 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4292 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4293 /* C3. See if we need to send a Fwd-TSN */
4294 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4296 * ISSUE with ECN, see FWD-TSN processing for notes
4297 * on issues that will occur when the ECN NONCE
4298 * stuff is put into SCTP for cross checking.
4300 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4301 send_forward_tsn(stcb, asoc);
4303 * ECN Nonce: Disable Nonce Sum check when
4304 * FWD TSN is sent and store resync tsn
4306 asoc->nonce_sum_check = 0;
4307 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4309 /* try to FR fwd-tsn's that get lost too */
4310 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4311 send_forward_tsn(stcb, asoc);
4316 /* Assure a timer is up */
4317 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4318 stcb->sctp_ep, stcb, lchk->whoTo);
4321 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4322 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4324 stcb->asoc.peers_rwnd,
4325 stcb->asoc.total_flight,
4326 stcb->asoc.total_output_queue_size);
4331 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4332 struct sctp_tcb *stcb, struct sctp_nets *net_from,
4333 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4334 int *abort_now, uint8_t flags,
4335 uint32_t cum_ack, uint32_t rwnd)
4337 struct sctp_association *asoc;
4338 struct sctp_tmit_chunk *tp1, *tp2;
4339 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4340 uint32_t sav_cum_ack;
4341 uint16_t wake_him = 0;
4342 uint32_t send_s = 0;
4344 int accum_moved = 0;
4345 int will_exit_fast_recovery = 0;
4346 uint32_t a_rwnd, old_rwnd;
4347 int win_probe_recovery = 0;
4348 int win_probe_recovered = 0;
4349 struct sctp_nets *net = NULL;
4350 int nonce_sum_flag, ecn_seg_sums = 0;
4352 uint8_t reneged_all = 0;
4353 uint8_t cmt_dac_flag;
4356 * we take any chance we can to service our queues since we cannot
4357 * get awoken when the socket is read from :<
4360 * Now perform the actual SACK handling: 1) Verify that it is not an
4361 * old sack, if so discard. 2) If there is nothing left in the send
4362 * queue (cum-ack is equal to last acked) then you have a duplicate
4363 * too, update any rwnd change and verify no timers are running.
4364 * then return. 3) Process any new consequtive data i.e. cum-ack
4365 * moved process these first and note that it moved. 4) Process any
4366 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4367 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4368 * sync up flightsizes and things, stop all timers and also check
4369 * for shutdown_pending state. If so then go ahead and send off the
4370 * shutdown. If in shutdown recv, send off the shutdown-ack and
4371 * start that timer, Ret. 9) Strike any non-acked things and do FR
4372 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4373 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4374 * if in shutdown_recv state.
4376 SCTP_TCB_LOCK_ASSERT(stcb);
4378 this_sack_lowest_newack = 0;
4380 SCTP_STAT_INCR(sctps_slowpath_sack);
4382 nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4383 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4384 #ifdef SCTP_ASOCLOG_OF_TSNS
4385 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4386 stcb->asoc.cumack_log_at++;
4387 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4388 stcb->asoc.cumack_log_at = 0;
4393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4394 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4395 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4397 old_rwnd = stcb->asoc.peers_rwnd;
4398 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4399 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4400 stcb->asoc.overall_error_count,
4402 SCTP_FROM_SCTP_INDATA,
4405 stcb->asoc.overall_error_count = 0;
4407 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4408 sctp_log_sack(asoc->last_acked_seq,
4415 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4417 uint32_t *dupdata, dblock;
4419 for (i = 0; i < num_dup; i++) {
4420 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4421 sizeof(uint32_t), (uint8_t *) & dblock);
4422 if (dupdata == NULL) {
4425 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4428 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4430 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4431 tp1 = TAILQ_LAST(&asoc->sent_queue,
4432 sctpchunk_listhead);
4433 send_s = tp1->rec.data.TSN_seq + 1;
4436 send_s = asoc->sending_seq;
4438 if (SCTP_TSN_GE(cum_ack, send_s)) {
4442 * no way, we have not even sent this TSN out yet.
4443 * Peer is hopelessly messed up with us.
4445 printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4448 printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4449 tp1->rec.data.TSN_seq, tp1);
4454 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4455 0, M_DONTWAIT, 1, MT_DATA);
4457 struct sctp_paramhdr *ph;
4460 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4462 ph = mtod(oper, struct sctp_paramhdr *);
4463 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4464 ph->param_length = htons(SCTP_BUF_LEN(oper));
4465 ippp = (uint32_t *) (ph + 1);
4466 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4468 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4469 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4473 /**********************/
4474 /* 1) check the range */
4475 /**********************/
4476 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4477 /* acking something behind */
4480 sav_cum_ack = asoc->last_acked_seq;
4482 /* update the Rwnd of the peer */
4483 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4484 TAILQ_EMPTY(&asoc->send_queue) &&
4485 (asoc->stream_queue_cnt == 0)) {
4486 /* nothing left on send/sent and strmq */
4487 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4488 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4489 asoc->peers_rwnd, 0, 0, a_rwnd);
4491 asoc->peers_rwnd = a_rwnd;
4492 if (asoc->sent_queue_retran_cnt) {
4493 asoc->sent_queue_retran_cnt = 0;
4495 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4496 /* SWS sender side engages */
4497 asoc->peers_rwnd = 0;
4499 /* stop any timers */
4500 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4501 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4502 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4503 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4504 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4505 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4506 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4507 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4510 net->partial_bytes_acked = 0;
4511 net->flight_size = 0;
4513 asoc->total_flight = 0;
4514 asoc->total_flight_count = 0;
4518 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4519 * things. The total byte count acked is tracked in netAckSz AND
4520 * netAck2 is used to track the total bytes acked that are un-
4521 * amibguious and were never retransmitted. We track these on a per
4522 * destination address basis.
4524 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4525 net->prev_cwnd = net->cwnd;
4530 * CMT: Reset CUC and Fast recovery algo variables before
4533 net->new_pseudo_cumack = 0;
4534 net->will_exit_fast_recovery = 0;
4536 /* process the new consecutive TSN first */
4537 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4538 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4539 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4541 * ECN Nonce: Add the nonce to the sender's
4544 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4546 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4548 * If it is less than ACKED, it is
4549 * now no-longer in flight. Higher
4550 * values may occur during marking
4552 if ((tp1->whoTo->dest_state &
4553 SCTP_ADDR_UNCONFIRMED) &&
4554 (tp1->snd_count < 2)) {
4556 * If there was no retran
4557 * and the address is
4558 * un-confirmed and we sent
4560 * sacked.. its confirmed,
4563 tp1->whoTo->dest_state &=
4564 ~SCTP_ADDR_UNCONFIRMED;
4566 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4567 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4568 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4569 tp1->whoTo->flight_size,
4571 (uintptr_t) tp1->whoTo,
4572 tp1->rec.data.TSN_seq);
4574 sctp_flight_size_decrease(tp1);
4575 sctp_total_flight_decrease(stcb, tp1);
4577 tp1->whoTo->net_ack += tp1->send_size;
4579 /* CMT SFR and DAC algos */
4580 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4581 tp1->whoTo->saw_newack = 1;
4583 if (tp1->snd_count < 2) {
4585 * True non-retransmited
4588 tp1->whoTo->net_ack2 +=
4591 /* update RTO too? */
4594 sctp_calculate_rto(stcb,
4596 &tp1->sent_rcv_time,
4597 sctp_align_safe_nocopy);
4602 * CMT: CUCv2 algorithm. From the
4603 * cumack'd TSNs, for each TSN being
4604 * acked for the first time, set the
4605 * following variables for the
4606 * corresp destination.
4607 * new_pseudo_cumack will trigger a
4609 * find_(rtx_)pseudo_cumack will
4610 * trigger search for the next
4611 * expected (rtx-)pseudo-cumack.
4613 tp1->whoTo->new_pseudo_cumack = 1;
4614 tp1->whoTo->find_pseudo_cumack = 1;
4615 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4618 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4619 sctp_log_sack(asoc->last_acked_seq,
4621 tp1->rec.data.TSN_seq,
4624 SCTP_LOG_TSN_ACKED);
4626 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4627 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4630 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4631 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4632 #ifdef SCTP_AUDITING_ENABLED
4633 sctp_audit_log(0xB3,
4634 (asoc->sent_queue_retran_cnt & 0x000000ff));
4637 if (tp1->rec.data.chunk_was_revoked) {
4638 /* deflate the cwnd */
4639 tp1->whoTo->cwnd -= tp1->book_size;
4640 tp1->rec.data.chunk_was_revoked = 0;
4642 tp1->sent = SCTP_DATAGRAM_ACKED;
4648 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4649 /* always set this up to cum-ack */
4650 asoc->this_sack_highest_gap = last_tsn;
4652 if ((num_seg > 0) || (num_nr_seg > 0)) {
4655 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4656 * to be greater than the cumack. Also reset saw_newack to 0
4659 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4660 net->saw_newack = 0;
4661 net->this_sack_highest_newack = last_tsn;
4665 * thisSackHighestGap will increase while handling NEW
4666 * segments this_sack_highest_newack will increase while
4667 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4668 * used for CMT DAC algo. saw_newack will also change.
4670 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4671 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4672 num_seg, num_nr_seg, &ecn_seg_sums)) {
4675 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4677 * validate the biggest_tsn_acked in the gap acks if
4678 * strict adherence is wanted.
4680 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4682 * peer is either confused or we are under
4683 * attack. We must abort.
4685 printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4693 /*******************************************/
4694 /* cancel ALL T3-send timer if accum moved */
4695 /*******************************************/
4696 if (asoc->sctp_cmt_on_off > 0) {
4697 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4698 if (net->new_pseudo_cumack)
4699 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4701 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4706 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4707 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4708 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4712 /********************************************/
4713 /* drop the acked chunks from the sentqueue */
4714 /********************************************/
4715 asoc->last_acked_seq = cum_ack;
4717 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4718 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4721 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4722 /* no more sent on list */
4723 printf("Warning, tp1->sent == %d and its now acked?\n",
4726 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4727 if (tp1->pr_sctp_on) {
4728 if (asoc->pr_sctp_cnt != 0)
4729 asoc->pr_sctp_cnt--;
4731 asoc->sent_queue_cnt--;
4733 /* sa_ignore NO_NULL_CHK */
4734 sctp_free_bufspace(stcb, asoc, tp1, 1);
4735 sctp_m_freem(tp1->data);
4737 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4738 asoc->sent_queue_cnt_removeable--;
4741 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4742 sctp_log_sack(asoc->last_acked_seq,
4744 tp1->rec.data.TSN_seq,
4747 SCTP_LOG_FREE_SENT);
4749 sctp_free_a_chunk(stcb, tp1);
4752 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4754 panic("Warning flight size is postive and should be 0");
4756 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4757 asoc->total_flight);
4759 asoc->total_flight = 0;
4761 /* sa_ignore NO_NULL_CHK */
4762 if ((wake_him) && (stcb->sctp_socket)) {
4763 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4767 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4768 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4769 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4771 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4772 so = SCTP_INP_SO(stcb->sctp_ep);
4773 atomic_add_int(&stcb->asoc.refcnt, 1);
4774 SCTP_TCB_UNLOCK(stcb);
4775 SCTP_SOCKET_LOCK(so, 1);
4776 SCTP_TCB_LOCK(stcb);
4777 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4778 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4779 /* assoc was freed while we were unlocked */
4780 SCTP_SOCKET_UNLOCK(so, 1);
4784 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4785 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4786 SCTP_SOCKET_UNLOCK(so, 1);
4789 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4790 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4794 if (asoc->fast_retran_loss_recovery && accum_moved) {
4795 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4796 /* Setup so we will exit RFC2582 fast recovery */
4797 will_exit_fast_recovery = 1;
4801 * Check for revoked fragments:
4803 * if Previous sack - Had no frags then we can't have any revoked if
4804 * Previous sack - Had frag's then - If we now have frags aka
4805 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4806 * some of them. else - The peer revoked all ACKED fragments, since
4807 * we had some before and now we have NONE.
4811 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4812 asoc->saw_sack_with_frags = 1;
4813 } else if (asoc->saw_sack_with_frags) {
4814 int cnt_revoked = 0;
4816 /* Peer revoked all dg's marked or acked */
4817 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4818 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4819 tp1->sent = SCTP_DATAGRAM_SENT;
4820 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4821 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4822 tp1->whoTo->flight_size,
4824 (uintptr_t) tp1->whoTo,
4825 tp1->rec.data.TSN_seq);
4827 sctp_flight_size_increase(tp1);
4828 sctp_total_flight_increase(stcb, tp1);
4829 tp1->rec.data.chunk_was_revoked = 1;
4831 * To ensure that this increase in
4832 * flightsize, which is artificial, does not
4833 * throttle the sender, we also increase the
4834 * cwnd artificially.
4836 tp1->whoTo->cwnd += tp1->book_size;
4843 asoc->saw_sack_with_frags = 0;
4846 asoc->saw_sack_with_nr_frags = 1;
4848 asoc->saw_sack_with_nr_frags = 0;
4850 /* JRS - Use the congestion control given in the CC module */
4851 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4853 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4854 /* nothing left in-flight */
4855 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4856 /* stop all timers */
4857 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4858 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4859 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4860 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4861 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4864 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4865 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4866 net->flight_size = 0;
4867 net->partial_bytes_acked = 0;
4869 asoc->total_flight = 0;
4870 asoc->total_flight_count = 0;
4872 /**********************************/
4873 /* Now what about shutdown issues */
4874 /**********************************/
4875 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4876 /* nothing left on sendqueue.. consider done */
4877 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4878 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4879 asoc->peers_rwnd, 0, 0, a_rwnd);
4881 asoc->peers_rwnd = a_rwnd;
4882 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4883 /* SWS sender side engages */
4884 asoc->peers_rwnd = 0;
4887 if ((asoc->stream_queue_cnt == 1) &&
4888 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4889 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4890 (asoc->locked_on_sending)
4892 struct sctp_stream_queue_pending *sp;
4895 * I may be in a state where we got all across.. but
4896 * cannot write more due to a shutdown... we abort
4897 * since the user did not indicate EOR in this case.
4899 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4901 if ((sp) && (sp->length == 0)) {
4902 asoc->locked_on_sending = NULL;
4903 if (sp->msg_is_complete) {
4904 asoc->stream_queue_cnt--;
4906 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4907 asoc->stream_queue_cnt--;
4911 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4912 (asoc->stream_queue_cnt == 0)) {
4913 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4914 /* Need to abort here */
4920 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4921 0, M_DONTWAIT, 1, MT_DATA);
4923 struct sctp_paramhdr *ph;
4926 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4928 ph = mtod(oper, struct sctp_paramhdr *);
4929 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4930 ph->param_length = htons(SCTP_BUF_LEN(oper));
4931 ippp = (uint32_t *) (ph + 1);
4932 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4934 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4935 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4938 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4939 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4940 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4942 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4943 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4944 sctp_stop_timers_for_shutdown(stcb);
4945 sctp_send_shutdown(stcb,
4946 stcb->asoc.primary_destination);
4947 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4948 stcb->sctp_ep, stcb, asoc->primary_destination);
4949 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4950 stcb->sctp_ep, stcb, asoc->primary_destination);
4953 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4954 (asoc->stream_queue_cnt == 0)) {
4955 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4958 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4959 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4960 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4961 sctp_send_shutdown_ack(stcb,
4962 stcb->asoc.primary_destination);
4963 sctp_stop_timers_for_shutdown(stcb);
4964 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4965 stcb->sctp_ep, stcb, asoc->primary_destination);
4970 * Now here we are going to recycle net_ack for a different use...
4973 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4978 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4979 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4980 * automatically ensure that.
4982 if ((asoc->sctp_cmt_on_off > 0) &&
4983 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4984 (cmt_dac_flag == 0)) {
4985 this_sack_lowest_newack = cum_ack;
4987 if ((num_seg > 0) || (num_nr_seg > 0)) {
4988 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4989 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4991 /* JRS - Use the congestion control given in the CC module */
4992 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4994 /******************************************************************
4995 * Here we do the stuff with ECN Nonce checking.
4996 * We basically check to see if the nonce sum flag was incorrect
4997 * or if resynchronization needs to be done. Also if we catch a
4998 * misbehaving receiver we give him the kick.
4999 ******************************************************************/
5001 if (asoc->ecn_nonce_allowed) {
5002 if (asoc->nonce_sum_check) {
5003 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5004 if (asoc->nonce_wait_for_ecne == 0) {
5005 struct sctp_tmit_chunk *lchk;
5007 lchk = TAILQ_FIRST(&asoc->send_queue);
5008 asoc->nonce_wait_for_ecne = 1;
5010 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5012 asoc->nonce_wait_tsn = asoc->sending_seq;
5015 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->nonce_wait_tsn)) {
5017 * Misbehaving peer. We need
5018 * to react to this guy
5020 asoc->ecn_allowed = 0;
5021 asoc->ecn_nonce_allowed = 0;
5026 /* See if Resynchronization Possible */
5027 if (SCTP_TSN_GT(asoc->last_acked_seq, asoc->nonce_resync_tsn)) {
5028 asoc->nonce_sum_check = 1;
5030 * now we must calculate what the base is.
5031 * We do this based on two things, we know
5032 * the total's for all the segments
5033 * gap-acked in the SACK, its stored in
5034 * ecn_seg_sums. We also know the SACK's
5035 * nonce sum, its in nonce_sum_flag. So we
5036 * can build a truth table to back-calculate
5038 * asoc->nonce_sum_expect_base:
5040 * SACK-flag-Value Seg-Sums Base 0 0 0
5044 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5048 /* Now are we exiting loss recovery ? */
5049 if (will_exit_fast_recovery) {
5050 /* Ok, we must exit fast recovery */
5051 asoc->fast_retran_loss_recovery = 0;
5053 if ((asoc->sat_t3_loss_recovery) &&
5054 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5055 /* end satellite t3 loss recovery */
5056 asoc->sat_t3_loss_recovery = 0;
5061 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5062 if (net->will_exit_fast_recovery) {
5063 /* Ok, we must exit fast recovery */
5064 net->fast_retran_loss_recovery = 0;
5068 /* Adjust and set the new rwnd value */
5069 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5070 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5071 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5073 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5074 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5075 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5076 /* SWS sender side engages */
5077 asoc->peers_rwnd = 0;
5079 if (asoc->peers_rwnd > old_rwnd) {
5080 win_probe_recovery = 1;
5083 * Now we must setup so we have a timer up for anyone with
5089 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5090 if (win_probe_recovery && (net->window_probe)) {
5091 win_probe_recovered = 1;
5093 * Find first chunk that was used with
5094 * window probe and clear the event. Put
5095 * it back into the send queue as if has
5098 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5099 if (tp1->window_probe) {
5100 sctp_window_probe_recovery(stcb, asoc, net, tp1);
5105 if (net->flight_size) {
5107 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5108 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5109 stcb->sctp_ep, stcb, net);
5111 if (net->window_probe) {
5112 net->window_probe = 0;
5115 if (net->window_probe) {
5117 * In window probes we must assure a timer
5118 * is still running there
5120 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5121 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5122 stcb->sctp_ep, stcb, net);
5125 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5126 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5128 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5130 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5131 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5132 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5133 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5134 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5140 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5141 (asoc->sent_queue_retran_cnt == 0) &&
5142 (win_probe_recovered == 0) &&
5145 * huh, this should not happen unless all packets are
5146 * PR-SCTP and marked to skip of course.
5148 if (sctp_fs_audit(asoc)) {
5149 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5150 net->flight_size = 0;
5152 asoc->total_flight = 0;
5153 asoc->total_flight_count = 0;
5154 asoc->sent_queue_retran_cnt = 0;
5155 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5156 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5157 sctp_flight_size_increase(tp1);
5158 sctp_total_flight_increase(stcb, tp1);
5159 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5160 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5167 /*********************************************/
5168 /* Here we perform PR-SCTP procedures */
5170 /*********************************************/
5171 /* C1. update advancedPeerAckPoint */
5172 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5173 asoc->advanced_peer_ack_point = cum_ack;
5175 /* C2. try to further move advancedPeerAckPoint ahead */
5176 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5177 struct sctp_tmit_chunk *lchk;
5178 uint32_t old_adv_peer_ack_point;
5180 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5181 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5182 /* C3. See if we need to send a Fwd-TSN */
5183 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5185 * ISSUE with ECN, see FWD-TSN processing for notes
5186 * on issues that will occur when the ECN NONCE
5187 * stuff is put into SCTP for cross checking.
5189 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5190 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5191 0xee, cum_ack, asoc->advanced_peer_ack_point,
5192 old_adv_peer_ack_point);
5194 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5196 send_forward_tsn(stcb, asoc);
5198 * ECN Nonce: Disable Nonce Sum check when
5199 * FWD TSN is sent and store resync tsn
5201 asoc->nonce_sum_check = 0;
5202 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5204 /* try to FR fwd-tsn's that get lost too */
5205 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5206 send_forward_tsn(stcb, asoc);
5211 /* Assure a timer is up */
5212 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5213 stcb->sctp_ep, stcb, lchk->whoTo);
5216 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5217 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5219 stcb->asoc.peers_rwnd,
5220 stcb->asoc.total_flight,
5221 stcb->asoc.total_output_queue_size);
5226 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5227 struct sctp_nets *netp, int *abort_flag)
5230 uint32_t cum_ack, a_rwnd;
5232 cum_ack = ntohl(cp->cumulative_tsn_ack);
5233 /* Arrange so a_rwnd does NOT change */
5234 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5236 /* Now call the express sack handling */
5237 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5241 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5242 struct sctp_stream_in *strmin)
5244 struct sctp_queued_to_read *ctl, *nctl;
5245 struct sctp_association *asoc;
5249 tt = strmin->last_sequence_delivered;
5251 * First deliver anything prior to and including the stream no that
5254 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5255 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5256 /* this is deliverable now */
5257 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5258 /* subtract pending on streams */
5259 asoc->size_on_all_streams -= ctl->length;
5260 sctp_ucount_decr(asoc->cnt_on_all_streams);
5261 /* deliver it to at least the delivery-q */
5262 if (stcb->sctp_socket) {
5263 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5264 sctp_add_to_readq(stcb->sctp_ep, stcb,
5266 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5269 /* no more delivery now. */
5274 * now we must deliver things in queue the normal way if any are
5277 tt = strmin->last_sequence_delivered + 1;
5278 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5279 if (tt == ctl->sinfo_ssn) {
5280 /* this is deliverable now */
5281 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5282 /* subtract pending on streams */
5283 asoc->size_on_all_streams -= ctl->length;
5284 sctp_ucount_decr(asoc->cnt_on_all_streams);
5285 /* deliver it to at least the delivery-q */
5286 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5287 if (stcb->sctp_socket) {
5288 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5289 sctp_add_to_readq(stcb->sctp_ep, stcb,
5291 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5294 tt = strmin->last_sequence_delivered + 1;
5302 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5303 struct sctp_association *asoc,
5304 uint16_t stream, uint16_t seq)
5306 struct sctp_tmit_chunk *chk, *nchk;
5308 /* For each one on here see if we need to toss it */
5310 * For now large messages held on the reasmqueue that are complete
5311 * will be tossed too. We could in theory do more work to spin
5312 * through and stop after dumping one msg aka seeing the start of a
5313 * new msg at the head, and call the delivery function... to see if
5314 * it can be delivered... But for now we just dump everything on the
5317 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5319 * Do not toss it if on a different stream or marked for
5320 * unordered delivery in which case the stream sequence
5321 * number has no meaning.
5323 if ((chk->rec.data.stream_number != stream) ||
5324 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5327 if (chk->rec.data.stream_seq == seq) {
5328 /* It needs to be tossed */
5329 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5330 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5331 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5332 asoc->str_of_pdapi = chk->rec.data.stream_number;
5333 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5334 asoc->fragment_flags = chk->rec.data.rcv_flags;
5336 asoc->size_on_reasm_queue -= chk->send_size;
5337 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5339 /* Clear up any stream problem */
5340 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5341 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5343 * We must dump forward this streams
5344 * sequence number if the chunk is not
5345 * unordered that is being skipped. There is
5346 * a chance that if the peer does not
5347 * include the last fragment in its FWD-TSN
5348 * we WILL have a problem here since you
5349 * would have a partial chunk in queue that
5350 * may not be deliverable. Also if a Partial
5351 * delivery API as started the user may get
5352 * a partial chunk. The next read returning
5353 * a new chunk... really ugly but I see no
5354 * way around it! Maybe a notify??
5356 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5359 sctp_m_freem(chk->data);
5362 sctp_free_a_chunk(stcb, chk);
5363 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5365 * If the stream_seq is > than the purging one, we
5375 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5376 struct sctp_forward_tsn_chunk *fwd,
5377 int *abort_flag, struct mbuf *m, int offset)
5380 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5381 * forward TSN, when the SACK comes back that acknowledges the
5382 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5383 * get quite tricky since we may have sent more data interveneing
5384 * and must carefully account for what the SACK says on the nonce
5385 * and any gaps that are reported. This work will NOT be done here,
5386 * but I note it here since it is really related to PR-SCTP and
5390 /* The pr-sctp fwd tsn */
5392 * here we will perform all the data receiver side steps for
5393 * processing FwdTSN, as required in by pr-sctp draft:
5395 * Assume we get FwdTSN(x):
5397 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5398 * others we have 3) examine and update re-ordering queue on
5399 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5400 * report where we are.
5402 struct sctp_association *asoc;
5403 uint32_t new_cum_tsn, gap;
5404 unsigned int i, fwd_sz, cumack_set_flag, m_size;
5406 struct sctp_stream_in *strm;
5407 struct sctp_tmit_chunk *chk, *nchk;
5408 struct sctp_queued_to_read *ctl, *sv;
5410 cumack_set_flag = 0;
5412 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5413 SCTPDBG(SCTP_DEBUG_INDATA1,
5414 "Bad size too small/big fwd-tsn\n");
5417 m_size = (stcb->asoc.mapping_array_size << 3);
5418 /*************************************************************/
5419 /* 1. Here we update local cumTSN and shift the bitmap array */
5420 /*************************************************************/
5421 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5423 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5424 /* Already got there ... */
5428 * now we know the new TSN is more advanced, let's find the actual
5431 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5432 asoc->cumulative_tsn = new_cum_tsn;
5433 if (gap >= m_size) {
5434 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5438 * out of range (of single byte chunks in the rwnd I
5439 * give out). This must be an attacker.
5442 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5443 0, M_DONTWAIT, 1, MT_DATA);
5445 struct sctp_paramhdr *ph;
5448 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5449 (sizeof(uint32_t) * 3);
5450 ph = mtod(oper, struct sctp_paramhdr *);
5451 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5452 ph->param_length = htons(SCTP_BUF_LEN(oper));
5453 ippp = (uint32_t *) (ph + 1);
5454 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5456 *ippp = asoc->highest_tsn_inside_map;
5458 *ippp = new_cum_tsn;
5460 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5461 sctp_abort_an_association(stcb->sctp_ep, stcb,
5462 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5465 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5467 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5468 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5469 asoc->highest_tsn_inside_map = new_cum_tsn;
5471 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5472 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5474 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5475 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5477 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5479 SCTP_TCB_LOCK_ASSERT(stcb);
5480 for (i = 0; i <= gap; i++) {
5481 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5482 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5483 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5484 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5485 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5490 /*************************************************************/
5491 /* 2. Clear up re-assembly queue */
5492 /*************************************************************/
5494 * First service it if pd-api is up, just in case we can progress it
5497 if (asoc->fragmented_delivery_inprogress) {
5498 sctp_service_reassembly(stcb, asoc);
5500 /* For each one on here see if we need to toss it */
5502 * For now large messages held on the reasmqueue that are complete
5503 * will be tossed too. We could in theory do more work to spin
5504 * through and stop after dumping one msg aka seeing the start of a
5505 * new msg at the head, and call the delivery function... to see if
5506 * it can be delivered... But for now we just dump everything on the
5509 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5510 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5511 /* It needs to be tossed */
5512 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5513 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5514 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5515 asoc->str_of_pdapi = chk->rec.data.stream_number;
5516 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5517 asoc->fragment_flags = chk->rec.data.rcv_flags;
5519 asoc->size_on_reasm_queue -= chk->send_size;
5520 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5522 /* Clear up any stream problem */
5523 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5524 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5526 * We must dump forward this streams
5527 * sequence number if the chunk is not
5528 * unordered that is being skipped. There is
5529 * a chance that if the peer does not
5530 * include the last fragment in its FWD-TSN
5531 * we WILL have a problem here since you
5532 * would have a partial chunk in queue that
5533 * may not be deliverable. Also if a Partial
5534 * delivery API as started the user may get
5535 * a partial chunk. The next read returning
5536 * a new chunk... really ugly but I see no
5537 * way around it! Maybe a notify??
5539 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5542 sctp_m_freem(chk->data);
5545 sctp_free_a_chunk(stcb, chk);
5548 * Ok we have gone beyond the end of the fwd-tsn's
5554 /*******************************************************/
5555 /* 3. Update the PR-stream re-ordering queues and fix */
5556 /* delivery issues as needed. */
5557 /*******************************************************/
5558 fwd_sz -= sizeof(*fwd);
5561 unsigned int num_str;
5562 struct sctp_strseq *stseq, strseqbuf;
5564 offset += sizeof(*fwd);
5566 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5567 num_str = fwd_sz / sizeof(struct sctp_strseq);
5568 for (i = 0; i < num_str; i++) {
5571 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5572 sizeof(struct sctp_strseq),
5573 (uint8_t *) & strseqbuf);
5574 offset += sizeof(struct sctp_strseq);
5575 if (stseq == NULL) {
5579 st = ntohs(stseq->stream);
5581 st = ntohs(stseq->sequence);
5582 stseq->sequence = st;
5587 * Ok we now look for the stream/seq on the read
5588 * queue where its not all delivered. If we find it
5589 * we transmute the read entry into a PDI_ABORTED.
5591 if (stseq->stream >= asoc->streamincnt) {
5592 /* screwed up streams, stop! */
5595 if ((asoc->str_of_pdapi == stseq->stream) &&
5596 (asoc->ssn_of_pdapi == stseq->sequence)) {
5598 * If this is the one we were partially
5599 * delivering now then we no longer are.
5600 * Note this will change with the reassembly
5603 asoc->fragmented_delivery_inprogress = 0;
5605 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5606 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5607 if ((ctl->sinfo_stream == stseq->stream) &&
5608 (ctl->sinfo_ssn == stseq->sequence)) {
5609 str_seq = (stseq->stream << 16) | stseq->sequence;
5611 ctl->pdapi_aborted = 1;
5612 sv = stcb->asoc.control_pdapi;
5613 stcb->asoc.control_pdapi = ctl;
5614 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5616 SCTP_PARTIAL_DELIVERY_ABORTED,
5618 SCTP_SO_NOT_LOCKED);
5619 stcb->asoc.control_pdapi = sv;
5621 } else if ((ctl->sinfo_stream == stseq->stream) &&
5622 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5623 /* We are past our victim SSN */
5627 strm = &asoc->strmin[stseq->stream];
5628 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5629 /* Update the sequence number */
5630 strm->last_sequence_delivered = stseq->sequence;
5632 /* now kick the stream the new way */
5633 /* sa_ignore NO_NULL_CHK */
5634 sctp_kick_prsctp_reorder_queue(stcb, strm);
5636 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5639 * Now slide thing forward.
5641 sctp_slide_mapping_arrays(stcb);
5643 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5644 /* now lets kick out and check for more fragmented delivery */
5645 /* sa_ignore NO_NULL_CHK */
5646 sctp_deliver_reasm_check(stcb, &stcb->asoc);