2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
48 #define SCTP_CALC_TSN_TO_GAP(gap, tsn, mapping_tsn) do { \
49 if ((compare_with_wrap(tsn, mapping_tsn, MAX_TSN)) || \
50 (tsn == mapping_tsn)) { \
51 gap = tsn - mapping_tsn; \
53 gap = (MAX_TSN - mapping_tsn) + tsn + 1; \
57 #define SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc) do { \
58 if (asoc->mapping_array_base_tsn == asoc->nr_mapping_array_base_tsn) { \
59 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, nr_gap); \
62 SCTP_CALC_TSN_TO_GAP(lgap, tsn, asoc->mapping_array_base_tsn); \
63 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, lgap); \
68 * NOTES: On the outbound side of things I need to check the sack timer to
69 * see if I should generate a sack into the chunk queue (if I have data to
70 * send that is and will be sending it .. for bundling.
72 * The callback in sctp_usrreq.c will get called when the socket is read from.
73 * This will cause sctp_service_queues() to get called on the top entry in
78 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
80 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
83 /* Calculate what the rwnd would be */
85 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
90 * This is really set wrong with respect to a 1-2-m socket. Since
91 * the sb_cc is the count that everyone as put up. When we re-write
92 * sctp_soreceive then we will fix this so that ONLY this
93 * associations data is taken into account.
95 if (stcb->sctp_socket == NULL)
98 if (stcb->asoc.sb_cc == 0 &&
99 asoc->size_on_reasm_queue == 0 &&
100 asoc->size_on_all_streams == 0) {
101 /* Full rwnd granted */
102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
105 /* get actual space */
106 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
109 * take out what has NOT been put on socket queue and we yet hold
112 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
113 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
119 /* what is the overhead of all these rwnd's */
120 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 * If the window gets too small due to ctrl-stuff, reduce it to 1,
123 * even it is 0. SWS engaged
125 if (calc < stcb->asoc.my_rwnd_control_len) {
134 * Build out our readq entry based on the incoming packet.
136 struct sctp_queued_to_read *
137 sctp_build_readq_entry(struct sctp_tcb *stcb,
138 struct sctp_nets *net,
139 uint32_t tsn, uint32_t ppid,
140 uint32_t context, uint16_t stream_no,
141 uint16_t stream_seq, uint8_t flags,
144 struct sctp_queued_to_read *read_queue_e = NULL;
146 sctp_alloc_a_readq(stcb, read_queue_e);
147 if (read_queue_e == NULL) {
150 read_queue_e->sinfo_stream = stream_no;
151 read_queue_e->sinfo_ssn = stream_seq;
152 read_queue_e->sinfo_flags = (flags << 8);
153 read_queue_e->sinfo_ppid = ppid;
154 read_queue_e->sinfo_context = stcb->asoc.context;
155 read_queue_e->sinfo_timetolive = 0;
156 read_queue_e->sinfo_tsn = tsn;
157 read_queue_e->sinfo_cumtsn = tsn;
158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 read_queue_e->whoFrom = net;
160 read_queue_e->length = 0;
161 atomic_add_int(&net->ref_count, 1);
162 read_queue_e->data = dm;
163 read_queue_e->spec_flags = 0;
164 read_queue_e->tail_mbuf = NULL;
165 read_queue_e->aux_data = NULL;
166 read_queue_e->stcb = stcb;
167 read_queue_e->port_from = stcb->rport;
168 read_queue_e->do_not_ref_stcb = 0;
169 read_queue_e->end_added = 0;
170 read_queue_e->some_taken = 0;
171 read_queue_e->pdapi_aborted = 0;
173 return (read_queue_e);
178 * Build out our readq entry based on the incoming packet.
180 static struct sctp_queued_to_read *
181 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
182 struct sctp_tmit_chunk *chk)
184 struct sctp_queued_to_read *read_queue_e = NULL;
186 sctp_alloc_a_readq(stcb, read_queue_e);
187 if (read_queue_e == NULL) {
190 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
191 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
192 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
193 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
194 read_queue_e->sinfo_context = stcb->asoc.context;
195 read_queue_e->sinfo_timetolive = 0;
196 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
197 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
198 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
199 read_queue_e->whoFrom = chk->whoTo;
200 read_queue_e->aux_data = NULL;
201 read_queue_e->length = 0;
202 atomic_add_int(&chk->whoTo->ref_count, 1);
203 read_queue_e->data = chk->data;
204 read_queue_e->tail_mbuf = NULL;
205 read_queue_e->stcb = stcb;
206 read_queue_e->port_from = stcb->rport;
207 read_queue_e->spec_flags = 0;
208 read_queue_e->do_not_ref_stcb = 0;
209 read_queue_e->end_added = 0;
210 read_queue_e->some_taken = 0;
211 read_queue_e->pdapi_aborted = 0;
213 return (read_queue_e);
218 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
219 struct sctp_sndrcvinfo *sinfo)
221 struct sctp_sndrcvinfo *outinfo;
225 int use_extended = 0;
227 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
228 /* user does not want the sndrcv ctl */
231 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
233 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
235 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
239 ret = sctp_get_mbuf_for_msg(len,
240 0, M_DONTWAIT, 1, MT_DATA);
246 /* We need a CMSG header followed by the struct */
247 cmh = mtod(ret, struct cmsghdr *);
248 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
249 cmh->cmsg_level = IPPROTO_SCTP;
251 cmh->cmsg_type = SCTP_EXTRCV;
253 memcpy(outinfo, sinfo, len);
255 cmh->cmsg_type = SCTP_SNDRCV;
259 SCTP_BUF_LEN(ret) = cmh->cmsg_len;
265 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
267 struct sctp_sndrcvinfo *sinfo)
269 struct sctp_sndrcvinfo *outinfo;
273 int use_extended = 0;
275 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
276 /* user does not want the sndrcv ctl */
279 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
281 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
283 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
285 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
290 /* We need a CMSG header followed by the struct */
291 cmh = (struct cmsghdr *)buf;
292 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
293 cmh->cmsg_level = IPPROTO_SCTP;
295 cmh->cmsg_type = SCTP_EXTRCV;
297 memcpy(outinfo, sinfo, len);
299 cmh->cmsg_type = SCTP_SNDRCV;
309 * We are delivering currently from the reassembly queue. We must continue to
310 * deliver until we either: 1) run out of space. 2) run out of sequential
311 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
314 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
316 struct sctp_tmit_chunk *chk;
322 /* EY if any out-of-order delivered, then tag it nr on nr_map */
323 uint32_t nr_tsn, nr_gap;
325 struct sctp_queued_to_read *control, *ctl, *ctlat;
330 cntDel = stream_no = 0;
331 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
332 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
333 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
334 /* socket above is long gone or going.. */
336 asoc->fragmented_delivery_inprogress = 0;
337 chk = TAILQ_FIRST(&asoc->reasmqueue);
339 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
340 asoc->size_on_reasm_queue -= chk->send_size;
341 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
343 * Lose the data pointer, since its in the socket
347 sctp_m_freem(chk->data);
350 /* Now free the address and data */
351 sctp_free_a_chunk(stcb, chk);
352 /* sa_ignore FREED_MEMORY */
353 chk = TAILQ_FIRST(&asoc->reasmqueue);
357 SCTP_TCB_LOCK_ASSERT(stcb);
359 chk = TAILQ_FIRST(&asoc->reasmqueue);
363 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
364 /* Can't deliver more :< */
367 stream_no = chk->rec.data.stream_number;
368 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
369 if (nxt_todel != chk->rec.data.stream_seq &&
370 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
372 * Not the next sequence to deliver in its stream OR
377 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
379 control = sctp_build_readq_entry_chk(stcb, chk);
380 if (control == NULL) {
384 /* save it off for our future deliveries */
385 stcb->asoc.control_pdapi = control;
386 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
390 sctp_add_to_readq(stcb->sctp_ep,
391 stcb, control, &stcb->sctp_socket->so_rcv, end,
392 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
395 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
399 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
400 stcb->asoc.control_pdapi,
401 chk->data, end, chk->rec.data.TSN_seq,
402 &stcb->sctp_socket->so_rcv)) {
404 * something is very wrong, either
405 * control_pdapi is NULL, or the tail_mbuf
406 * is corrupt, or there is a EOM already on
409 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
413 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
414 panic("This should not happen control_pdapi NULL?");
416 /* if we did not panic, it was a EOM */
417 panic("Bad chunking ??");
419 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
420 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
422 SCTP_PRINTF("Bad chunking ??\n");
423 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
431 /* pull it we did it */
432 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
434 * EY this is the chunk that should be tagged nr gapped
435 * calculate the gap and such then tag this TSN nr
436 * chk->rec.data.TSN_seq
439 * EY!-TODO- this tsn should be tagged nr only if it is
440 * out-of-order, the if statement should be modified
442 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
444 nr_tsn = chk->rec.data.TSN_seq;
445 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
446 if ((nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3)) ||
447 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
449 * EY The 1st should never happen, as in
450 * process_a_data_chunk method this check
454 * EY The 2nd should never happen, because
455 * nr_mapping_array is always expanded when
456 * mapping_array is expanded
458 printf("Impossible nr_gap ack range failed\n");
460 SCTP_TCB_LOCK_ASSERT(stcb);
461 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
462 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
463 if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
464 asoc->highest_tsn_inside_nr_map = nr_tsn;
467 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
468 asoc->fragmented_delivery_inprogress = 0;
469 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
470 asoc->strmin[stream_no].last_sequence_delivered++;
472 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
473 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
475 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
477 * turn the flag back on since we just delivered
480 asoc->fragmented_delivery_inprogress = 1;
482 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
483 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
484 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
485 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
487 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
488 asoc->size_on_reasm_queue -= chk->send_size;
489 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
490 /* free up the chk */
492 sctp_free_a_chunk(stcb, chk);
494 if (asoc->fragmented_delivery_inprogress == 0) {
496 * Now lets see if we can deliver the next one on
499 struct sctp_stream_in *strm;
501 strm = &asoc->strmin[stream_no];
502 nxt_todel = strm->last_sequence_delivered + 1;
503 ctl = TAILQ_FIRST(&strm->inqueue);
504 if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
505 while (ctl != NULL) {
506 /* Deliver more if we can. */
507 if (nxt_todel == ctl->sinfo_ssn) {
508 ctlat = TAILQ_NEXT(ctl, next);
509 TAILQ_REMOVE(&strm->inqueue, ctl, next);
510 asoc->size_on_all_streams -= ctl->length;
511 sctp_ucount_decr(asoc->cnt_on_all_streams);
512 strm->last_sequence_delivered++;
517 nr_tsn = ctl->sinfo_tsn;
518 sctp_add_to_readq(stcb->sctp_ep, stcb,
520 &stcb->sctp_socket->so_rcv, 1,
521 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
523 * EY -now something is
524 * delivered, calculate
525 * nr_gap and tag this tsn
528 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
529 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
530 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
531 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
532 printf("Impossible NR gap calculation?\n");
565 SCTP_TCB_LOCK_ASSERT(stcb);
566 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
567 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
568 if (compare_with_wrap(nr_tsn,
569 asoc->highest_tsn_inside_nr_map,
571 asoc->highest_tsn_inside_nr_map = nr_tsn;
578 nxt_todel = strm->last_sequence_delivered + 1;
583 /* sa_ignore FREED_MEMORY */
584 chk = TAILQ_FIRST(&asoc->reasmqueue);
589 * Queue the chunk either right into the socket buffer if it is the next one
590 * to go OR put it in the correct place in the delivery queue. If we do
591 * append to the so_buf, keep doing so until we are out of order. One big
592 * question still remains, what to do when the socket buffer is FULL??
595 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
596 struct sctp_queued_to_read *control, int *abort_flag)
599 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
600 * all the data in one stream this could happen quite rapidly. One
601 * could use the TSN to keep track of things, but this scheme breaks
602 * down in the other type of stream useage that could occur. Send a
603 * single msg to stream 0, send 4Billion messages to stream 1, now
604 * send a message to stream 0. You have a situation where the TSN
605 * has wrapped but not in the stream. Is this worth worrying about
606 * or should we just change our queue sort at the bottom to be by
609 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
610 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
611 * assignment this could happen... and I don't see how this would be
612 * a violation. So for now I am undecided an will leave the sort by
613 * SSN alone. Maybe a hybred approach is the answer
616 struct sctp_stream_in *strm;
617 struct sctp_queued_to_read *at;
622 /* EY- will be used to calculate nr-gap for a tsn */
623 uint32_t nr_tsn, nr_gap;
626 asoc->size_on_all_streams += control->length;
627 sctp_ucount_incr(asoc->cnt_on_all_streams);
628 strm = &asoc->strmin[control->sinfo_stream];
629 nxt_todel = strm->last_sequence_delivered + 1;
630 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
631 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
633 SCTPDBG(SCTP_DEBUG_INDATA1,
634 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
635 (uint32_t) control->sinfo_stream,
636 (uint32_t) strm->last_sequence_delivered,
637 (uint32_t) nxt_todel);
638 if (compare_with_wrap(strm->last_sequence_delivered,
639 control->sinfo_ssn, MAX_SEQ) ||
640 (strm->last_sequence_delivered == control->sinfo_ssn)) {
641 /* The incoming sseq is behind where we last delivered? */
642 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
643 control->sinfo_ssn, strm->last_sequence_delivered);
646 * throw it in the stream so it gets cleaned up in
647 * association destruction
649 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
650 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
651 0, M_DONTWAIT, 1, MT_DATA);
653 struct sctp_paramhdr *ph;
656 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
657 (sizeof(uint32_t) * 3);
658 ph = mtod(oper, struct sctp_paramhdr *);
659 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
660 ph->param_length = htons(SCTP_BUF_LEN(oper));
661 ippp = (uint32_t *) (ph + 1);
662 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
664 *ippp = control->sinfo_tsn;
666 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
668 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
669 sctp_abort_an_association(stcb->sctp_ep, stcb,
670 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
676 if (nxt_todel == control->sinfo_ssn) {
677 /* can be delivered right away? */
678 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
679 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
681 /* EY it wont be queued if it could be delivered directly */
683 asoc->size_on_all_streams -= control->length;
684 sctp_ucount_decr(asoc->cnt_on_all_streams);
685 strm->last_sequence_delivered++;
686 /* EY will be used to calculate nr-gap */
687 nr_tsn = control->sinfo_tsn;
688 sctp_add_to_readq(stcb->sctp_ep, stcb,
690 &stcb->sctp_socket->so_rcv, 1,
691 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
693 * EY this is the chunk that should be tagged nr gapped
694 * calculate the gap and such then tag this TSN nr
695 * chk->rec.data.TSN_seq
697 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
698 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
699 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
700 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
701 printf("Impossible nr_tsn set 2?\n");
703 * EY The 1st should never happen, as in
704 * process_a_data_chunk method this check
708 * EY The 2nd should never happen, because
709 * nr_mapping_array is always expanded when
710 * mapping_array is expanded
713 SCTP_TCB_LOCK_ASSERT(stcb);
714 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
715 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
716 if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
717 asoc->highest_tsn_inside_nr_map = nr_tsn;
720 control = TAILQ_FIRST(&strm->inqueue);
721 while (control != NULL) {
723 nxt_todel = strm->last_sequence_delivered + 1;
724 if (nxt_todel == control->sinfo_ssn) {
725 at = TAILQ_NEXT(control, next);
726 TAILQ_REMOVE(&strm->inqueue, control, next);
727 asoc->size_on_all_streams -= control->length;
728 sctp_ucount_decr(asoc->cnt_on_all_streams);
729 strm->last_sequence_delivered++;
731 * We ignore the return of deliver_data here
732 * since we always can hold the chunk on the
733 * d-queue. And we have a finite number that
734 * can be delivered from the strq.
736 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
737 sctp_log_strm_del(control, NULL,
738 SCTP_STR_LOG_FROM_IMMED_DEL);
740 /* EY will be used to calculate nr-gap */
741 nr_tsn = control->sinfo_tsn;
742 sctp_add_to_readq(stcb->sctp_ep, stcb,
744 &stcb->sctp_socket->so_rcv, 1,
745 SCTP_READ_LOCK_NOT_HELD,
748 * EY this is the chunk that should be
749 * tagged nr gapped calculate the gap and
750 * such then tag this TSN nr
751 * chk->rec.data.TSN_seq
753 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
754 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
755 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
756 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
757 printf("Impossible nr TSN set 3?\n");
759 * EY The 1st should never
761 * process_a_data_chunk
762 * method this check should
766 * EY The 2nd should never
768 * nr_mapping_array is
769 * always expanded when
770 * mapping_array is expanded
773 SCTP_TCB_LOCK_ASSERT(stcb);
774 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
775 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
776 if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
778 asoc->highest_tsn_inside_nr_map = nr_tsn;
789 * Ok, we did not deliver this guy, find the correct place
790 * to put it on the queue.
792 if ((compare_with_wrap(asoc->cumulative_tsn,
793 control->sinfo_tsn, MAX_TSN)) ||
794 (control->sinfo_tsn == asoc->cumulative_tsn)) {
797 if (TAILQ_EMPTY(&strm->inqueue)) {
799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
800 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
802 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
804 TAILQ_FOREACH(at, &strm->inqueue, next) {
805 if (compare_with_wrap(at->sinfo_ssn,
806 control->sinfo_ssn, MAX_SEQ)) {
808 * one in queue is bigger than the
809 * new one, insert before this one
811 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
812 sctp_log_strm_del(control, at,
813 SCTP_STR_LOG_FROM_INSERT_MD);
815 TAILQ_INSERT_BEFORE(at, control, next);
817 } else if (at->sinfo_ssn == control->sinfo_ssn) {
819 * Gak, He sent me a duplicate str
823 * foo bar, I guess I will just free
824 * this new guy, should we abort
825 * too? FIX ME MAYBE? Or it COULD be
826 * that the SSN's have wrapped.
827 * Maybe I should compare to TSN
828 * somehow... sigh for now just blow
833 sctp_m_freem(control->data);
834 control->data = NULL;
835 asoc->size_on_all_streams -= control->length;
836 sctp_ucount_decr(asoc->cnt_on_all_streams);
837 if (control->whoFrom)
838 sctp_free_remote_addr(control->whoFrom);
839 control->whoFrom = NULL;
840 sctp_free_a_readq(stcb, control);
843 if (TAILQ_NEXT(at, next) == NULL) {
845 * We are at the end, insert
848 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
849 sctp_log_strm_del(control, at,
850 SCTP_STR_LOG_FROM_INSERT_TL);
852 TAILQ_INSERT_AFTER(&strm->inqueue,
863 * Returns two things: You get the total size of the deliverable parts of the
864 * first fragmented message on the reassembly queue. And you get a 1 back if
865 * all of the message is ready or a 0 back if the message is still incomplete
868 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
870 struct sctp_tmit_chunk *chk;
874 chk = TAILQ_FIRST(&asoc->reasmqueue);
876 /* nothing on the queue */
879 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
880 /* Not a first on the queue */
883 tsn = chk->rec.data.TSN_seq;
885 if (tsn != chk->rec.data.TSN_seq) {
888 *t_size += chk->send_size;
889 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
893 chk = TAILQ_NEXT(chk, sctp_next);
899 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
901 struct sctp_tmit_chunk *chk;
903 uint32_t tsize, pd_point;
906 chk = TAILQ_FIRST(&asoc->reasmqueue);
909 asoc->size_on_reasm_queue = 0;
910 asoc->cnt_on_reasm_queue = 0;
913 if (asoc->fragmented_delivery_inprogress == 0) {
915 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
916 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
917 (nxt_todel == chk->rec.data.stream_seq ||
918 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
920 * Yep the first one is here and its ok to deliver
923 if (stcb->sctp_socket) {
924 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
925 stcb->sctp_ep->partial_delivery_point);
927 pd_point = stcb->sctp_ep->partial_delivery_point;
929 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
932 * Yes, we setup to start reception, by
933 * backing down the TSN just in case we
934 * can't deliver. If we
936 asoc->fragmented_delivery_inprogress = 1;
937 asoc->tsn_last_delivered =
938 chk->rec.data.TSN_seq - 1;
940 chk->rec.data.stream_number;
941 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
942 asoc->pdapi_ppid = chk->rec.data.payloadtype;
943 asoc->fragment_flags = chk->rec.data.rcv_flags;
944 sctp_service_reassembly(stcb, asoc);
949 * Service re-assembly will deliver stream data queued at
950 * the end of fragmented delivery.. but it wont know to go
951 * back and call itself again... we do that here with the
954 sctp_service_reassembly(stcb, asoc);
955 if (asoc->fragmented_delivery_inprogress == 0) {
957 * finished our Fragmented delivery, could be more
966 * Dump onto the re-assembly queue, in its proper place. After dumping on the
967 * queue, see if anthing can be delivered. If so pull it off (or as much as
968 * we can. If we run out of space then we must dump what we can and set the
969 * appropriate flag to say we queued what we could.
972 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
973 struct sctp_tmit_chunk *chk, int *abort_flag)
976 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
978 struct sctp_tmit_chunk *at, *prev, *next;
981 cum_ackp1 = asoc->tsn_last_delivered + 1;
982 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
983 /* This is the first one on the queue */
984 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
986 * we do not check for delivery of anything when only one
989 asoc->size_on_reasm_queue = chk->send_size;
990 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
991 if (chk->rec.data.TSN_seq == cum_ackp1) {
992 if (asoc->fragmented_delivery_inprogress == 0 &&
993 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
994 SCTP_DATA_FIRST_FRAG) {
996 * An empty queue, no delivery inprogress,
997 * we hit the next one and it does NOT have
998 * a FIRST fragment mark.
1000 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
1001 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1002 0, M_DONTWAIT, 1, MT_DATA);
1005 struct sctp_paramhdr *ph;
1008 SCTP_BUF_LEN(oper) =
1009 sizeof(struct sctp_paramhdr) +
1010 (sizeof(uint32_t) * 3);
1011 ph = mtod(oper, struct sctp_paramhdr *);
1013 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1014 ph->param_length = htons(SCTP_BUF_LEN(oper));
1015 ippp = (uint32_t *) (ph + 1);
1016 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
1018 *ippp = chk->rec.data.TSN_seq;
1020 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1023 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
1024 sctp_abort_an_association(stcb->sctp_ep, stcb,
1025 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1027 } else if (asoc->fragmented_delivery_inprogress &&
1028 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1030 * We are doing a partial delivery and the
1031 * NEXT chunk MUST be either the LAST or
1032 * MIDDLE fragment NOT a FIRST
1034 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1035 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1036 0, M_DONTWAIT, 1, MT_DATA);
1038 struct sctp_paramhdr *ph;
1041 SCTP_BUF_LEN(oper) =
1042 sizeof(struct sctp_paramhdr) +
1043 (3 * sizeof(uint32_t));
1044 ph = mtod(oper, struct sctp_paramhdr *);
1046 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1047 ph->param_length = htons(SCTP_BUF_LEN(oper));
1048 ippp = (uint32_t *) (ph + 1);
1049 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
1051 *ippp = chk->rec.data.TSN_seq;
1053 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1055 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
1056 sctp_abort_an_association(stcb->sctp_ep, stcb,
1057 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1059 } else if (asoc->fragmented_delivery_inprogress) {
1061 * Here we are ok with a MIDDLE or LAST
1064 if (chk->rec.data.stream_number !=
1065 asoc->str_of_pdapi) {
1066 /* Got to be the right STR No */
1067 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
1068 chk->rec.data.stream_number,
1069 asoc->str_of_pdapi);
1070 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1071 0, M_DONTWAIT, 1, MT_DATA);
1073 struct sctp_paramhdr *ph;
1076 SCTP_BUF_LEN(oper) =
1077 sizeof(struct sctp_paramhdr) +
1078 (sizeof(uint32_t) * 3);
1080 struct sctp_paramhdr *);
1082 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1084 htons(SCTP_BUF_LEN(oper));
1085 ippp = (uint32_t *) (ph + 1);
1086 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1088 *ippp = chk->rec.data.TSN_seq;
1090 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1092 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
1093 sctp_abort_an_association(stcb->sctp_ep,
1094 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1096 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1097 SCTP_DATA_UNORDERED &&
1098 chk->rec.data.stream_seq !=
1099 asoc->ssn_of_pdapi) {
1100 /* Got to be the right STR Seq */
1101 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1102 chk->rec.data.stream_seq,
1103 asoc->ssn_of_pdapi);
1104 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1105 0, M_DONTWAIT, 1, MT_DATA);
1107 struct sctp_paramhdr *ph;
1110 SCTP_BUF_LEN(oper) =
1111 sizeof(struct sctp_paramhdr) +
1112 (3 * sizeof(uint32_t));
1114 struct sctp_paramhdr *);
1116 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1118 htons(SCTP_BUF_LEN(oper));
1119 ippp = (uint32_t *) (ph + 1);
1120 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1122 *ippp = chk->rec.data.TSN_seq;
1124 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1127 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1128 sctp_abort_an_association(stcb->sctp_ep,
1129 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1136 /* Find its place */
1137 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1138 if (compare_with_wrap(at->rec.data.TSN_seq,
1139 chk->rec.data.TSN_seq, MAX_TSN)) {
1141 * one in queue is bigger than the new one, insert
1145 asoc->size_on_reasm_queue += chk->send_size;
1146 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1148 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1150 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1151 /* Gak, He sent me a duplicate str seq number */
1153 * foo bar, I guess I will just free this new guy,
1154 * should we abort too? FIX ME MAYBE? Or it COULD be
1155 * that the SSN's have wrapped. Maybe I should
1156 * compare to TSN somehow... sigh for now just blow
1160 sctp_m_freem(chk->data);
1163 sctp_free_a_chunk(stcb, chk);
1166 last_flags = at->rec.data.rcv_flags;
1167 last_tsn = at->rec.data.TSN_seq;
1169 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1171 * We are at the end, insert it after this
1174 /* check it first */
1175 asoc->size_on_reasm_queue += chk->send_size;
1176 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1177 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1182 /* Now the audits */
1184 prev_tsn = chk->rec.data.TSN_seq - 1;
1185 if (prev_tsn == prev->rec.data.TSN_seq) {
1187 * Ok the one I am dropping onto the end is the
1188 * NEXT. A bit of valdiation here.
1190 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1191 SCTP_DATA_FIRST_FRAG ||
1192 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1193 SCTP_DATA_MIDDLE_FRAG) {
1195 * Insert chk MUST be a MIDDLE or LAST
1198 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1199 SCTP_DATA_FIRST_FRAG) {
1200 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1201 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1202 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1203 0, M_DONTWAIT, 1, MT_DATA);
1205 struct sctp_paramhdr *ph;
1208 SCTP_BUF_LEN(oper) =
1209 sizeof(struct sctp_paramhdr) +
1210 (3 * sizeof(uint32_t));
1212 struct sctp_paramhdr *);
1214 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1216 htons(SCTP_BUF_LEN(oper));
1217 ippp = (uint32_t *) (ph + 1);
1218 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1220 *ippp = chk->rec.data.TSN_seq;
1222 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1225 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1226 sctp_abort_an_association(stcb->sctp_ep,
1227 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1231 if (chk->rec.data.stream_number !=
1232 prev->rec.data.stream_number) {
1234 * Huh, need the correct STR here,
1235 * they must be the same.
1237 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1238 chk->rec.data.stream_number,
1239 prev->rec.data.stream_number);
1240 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1241 0, M_DONTWAIT, 1, MT_DATA);
1243 struct sctp_paramhdr *ph;
1246 SCTP_BUF_LEN(oper) =
1247 sizeof(struct sctp_paramhdr) +
1248 (3 * sizeof(uint32_t));
1250 struct sctp_paramhdr *);
1252 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1254 htons(SCTP_BUF_LEN(oper));
1255 ippp = (uint32_t *) (ph + 1);
1256 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1258 *ippp = chk->rec.data.TSN_seq;
1260 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1262 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1263 sctp_abort_an_association(stcb->sctp_ep,
1264 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1269 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1270 chk->rec.data.stream_seq !=
1271 prev->rec.data.stream_seq) {
1273 * Huh, need the correct STR here,
1274 * they must be the same.
1276 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1277 chk->rec.data.stream_seq,
1278 prev->rec.data.stream_seq);
1279 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1280 0, M_DONTWAIT, 1, MT_DATA);
1282 struct sctp_paramhdr *ph;
1285 SCTP_BUF_LEN(oper) =
1286 sizeof(struct sctp_paramhdr) +
1287 (3 * sizeof(uint32_t));
1289 struct sctp_paramhdr *);
1291 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1293 htons(SCTP_BUF_LEN(oper));
1294 ippp = (uint32_t *) (ph + 1);
1295 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1297 *ippp = chk->rec.data.TSN_seq;
1299 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1301 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1302 sctp_abort_an_association(stcb->sctp_ep,
1303 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1308 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1309 SCTP_DATA_LAST_FRAG) {
1310 /* Insert chk MUST be a FIRST */
1311 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1312 SCTP_DATA_FIRST_FRAG) {
1313 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1314 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1315 0, M_DONTWAIT, 1, MT_DATA);
1317 struct sctp_paramhdr *ph;
1320 SCTP_BUF_LEN(oper) =
1321 sizeof(struct sctp_paramhdr) +
1322 (3 * sizeof(uint32_t));
1324 struct sctp_paramhdr *);
1326 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1328 htons(SCTP_BUF_LEN(oper));
1329 ippp = (uint32_t *) (ph + 1);
1330 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1332 *ippp = chk->rec.data.TSN_seq;
1334 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1337 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1338 sctp_abort_an_association(stcb->sctp_ep,
1339 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1348 post_tsn = chk->rec.data.TSN_seq + 1;
1349 if (post_tsn == next->rec.data.TSN_seq) {
1351 * Ok the one I am inserting ahead of is my NEXT
1352 * one. A bit of valdiation here.
1354 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1355 /* Insert chk MUST be a last fragment */
1356 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1357 != SCTP_DATA_LAST_FRAG) {
1358 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1359 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1360 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1361 0, M_DONTWAIT, 1, MT_DATA);
1363 struct sctp_paramhdr *ph;
1366 SCTP_BUF_LEN(oper) =
1367 sizeof(struct sctp_paramhdr) +
1368 (3 * sizeof(uint32_t));
1370 struct sctp_paramhdr *);
1372 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1374 htons(SCTP_BUF_LEN(oper));
1375 ippp = (uint32_t *) (ph + 1);
1376 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1378 *ippp = chk->rec.data.TSN_seq;
1380 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1382 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1383 sctp_abort_an_association(stcb->sctp_ep,
1384 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1389 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1390 SCTP_DATA_MIDDLE_FRAG ||
1391 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1392 SCTP_DATA_LAST_FRAG) {
1394 * Insert chk CAN be MIDDLE or FIRST NOT
1397 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1398 SCTP_DATA_LAST_FRAG) {
1399 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1400 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1401 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1402 0, M_DONTWAIT, 1, MT_DATA);
1404 struct sctp_paramhdr *ph;
1407 SCTP_BUF_LEN(oper) =
1408 sizeof(struct sctp_paramhdr) +
1409 (3 * sizeof(uint32_t));
1411 struct sctp_paramhdr *);
1413 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1415 htons(SCTP_BUF_LEN(oper));
1416 ippp = (uint32_t *) (ph + 1);
1417 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1419 *ippp = chk->rec.data.TSN_seq;
1421 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1424 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1425 sctp_abort_an_association(stcb->sctp_ep,
1426 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1431 if (chk->rec.data.stream_number !=
1432 next->rec.data.stream_number) {
1434 * Huh, need the correct STR here,
1435 * they must be the same.
1437 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1438 chk->rec.data.stream_number,
1439 next->rec.data.stream_number);
1440 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1441 0, M_DONTWAIT, 1, MT_DATA);
1443 struct sctp_paramhdr *ph;
1446 SCTP_BUF_LEN(oper) =
1447 sizeof(struct sctp_paramhdr) +
1448 (3 * sizeof(uint32_t));
1450 struct sctp_paramhdr *);
1452 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1454 htons(SCTP_BUF_LEN(oper));
1455 ippp = (uint32_t *) (ph + 1);
1456 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1458 *ippp = chk->rec.data.TSN_seq;
1460 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1463 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1464 sctp_abort_an_association(stcb->sctp_ep,
1465 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1470 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1471 chk->rec.data.stream_seq !=
1472 next->rec.data.stream_seq) {
1474 * Huh, need the correct STR here,
1475 * they must be the same.
1477 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1478 chk->rec.data.stream_seq,
1479 next->rec.data.stream_seq);
1480 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1481 0, M_DONTWAIT, 1, MT_DATA);
1483 struct sctp_paramhdr *ph;
1486 SCTP_BUF_LEN(oper) =
1487 sizeof(struct sctp_paramhdr) +
1488 (3 * sizeof(uint32_t));
1490 struct sctp_paramhdr *);
1492 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1494 htons(SCTP_BUF_LEN(oper));
1495 ippp = (uint32_t *) (ph + 1);
1496 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1498 *ippp = chk->rec.data.TSN_seq;
1500 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1502 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1503 sctp_abort_an_association(stcb->sctp_ep,
1504 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1512 /* Do we need to do some delivery? check */
1513 sctp_deliver_reasm_check(stcb, asoc);
1517 * This is an unfortunate routine. It checks to make sure a evil guy is not
1518 * stuffing us full of bad packet fragments. A broken peer could also do this
1519 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1523 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1526 struct sctp_tmit_chunk *at;
1529 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1530 if (compare_with_wrap(TSN_seq,
1531 at->rec.data.TSN_seq, MAX_TSN)) {
1532 /* is it one bigger? */
1533 tsn_est = at->rec.data.TSN_seq + 1;
1534 if (tsn_est == TSN_seq) {
1535 /* yep. It better be a last then */
1536 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1537 SCTP_DATA_LAST_FRAG) {
1539 * Ok this guy belongs next to a guy
1540 * that is NOT last, it should be a
1541 * middle/last, not a complete
1547 * This guy is ok since its a LAST
1548 * and the new chunk is a fully
1549 * self- contained one.
1554 } else if (TSN_seq == at->rec.data.TSN_seq) {
1555 /* Software error since I have a dup? */
1559 * Ok, 'at' is larger than new chunk but does it
1560 * need to be right before it.
1562 tsn_est = TSN_seq + 1;
1563 if (tsn_est == at->rec.data.TSN_seq) {
1564 /* Yep, It better be a first */
1565 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1566 SCTP_DATA_FIRST_FRAG) {
1579 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1580 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1581 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1582 int *break_flag, int last_chunk)
1584 /* Process a data chunk */
1585 /* struct sctp_tmit_chunk *chk; */
1586 struct sctp_tmit_chunk *chk;
1589 /* EY - for nr_sack */
1593 int need_reasm_check = 0;
1594 uint16_t strmno, strmseq;
1596 struct sctp_queued_to_read *control;
1598 uint32_t protocol_id;
1599 uint8_t chunk_flags;
1600 struct sctp_stream_reset_list *liste;
1603 tsn = ntohl(ch->dp.tsn);
1604 chunk_flags = ch->ch.chunk_flags;
1605 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1606 asoc->send_sack = 1;
1608 protocol_id = ch->dp.protocol_id;
1609 ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
1610 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1611 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1616 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1617 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1618 asoc->cumulative_tsn == tsn) {
1619 /* It is a duplicate */
1620 SCTP_STAT_INCR(sctps_recvdupdata);
1621 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1622 /* Record a dup for the next outbound sack */
1623 asoc->dup_tsns[asoc->numduptsns] = tsn;
1626 asoc->send_sack = 1;
1629 /* Calculate the number of TSN's between the base and this TSN */
1630 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1631 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1632 /* Can't hold the bit in the mapping at max array, toss it */
1635 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1636 SCTP_TCB_LOCK_ASSERT(stcb);
1637 if (sctp_expand_mapping_array(asoc, gap)) {
1638 /* Can't expand, drop it */
1642 /* EY - for nr_sack */
1645 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1648 /* See if we have received this one already */
1649 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1650 SCTP_STAT_INCR(sctps_recvdupdata);
1651 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1652 /* Record a dup for the next outbound sack */
1653 asoc->dup_tsns[asoc->numduptsns] = tsn;
1656 asoc->send_sack = 1;
1660 * Check to see about the GONE flag, duplicates would cause a sack
1661 * to be sent up above
1663 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1664 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1665 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1668 * wait a minute, this guy is gone, there is no longer a
1669 * receiver. Send peer an ABORT!
1671 struct mbuf *op_err;
1673 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1674 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1679 * Now before going further we see if there is room. If NOT then we
1680 * MAY let one through only IF this TSN is the one we are waiting
1681 * for on a partial delivery API.
1684 /* now do the tests */
1685 if (((asoc->cnt_on_all_streams +
1686 asoc->cnt_on_reasm_queue +
1687 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1688 (((int)asoc->my_rwnd) <= 0)) {
1690 * When we have NO room in the rwnd we check to make sure
1691 * the reader is doing its job...
1693 if (stcb->sctp_socket->so_rcv.sb_cc) {
1694 /* some to read, wake-up */
1695 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1698 so = SCTP_INP_SO(stcb->sctp_ep);
1699 atomic_add_int(&stcb->asoc.refcnt, 1);
1700 SCTP_TCB_UNLOCK(stcb);
1701 SCTP_SOCKET_LOCK(so, 1);
1702 SCTP_TCB_LOCK(stcb);
1703 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1704 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1705 /* assoc was freed while we were unlocked */
1706 SCTP_SOCKET_UNLOCK(so, 1);
1710 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1711 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1712 SCTP_SOCKET_UNLOCK(so, 1);
1715 /* now is it in the mapping array of what we have accepted? */
1716 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1717 /* Nope not in the valid range dump it */
1718 sctp_set_rwnd(stcb, asoc);
1719 if ((asoc->cnt_on_all_streams +
1720 asoc->cnt_on_reasm_queue +
1721 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1722 SCTP_STAT_INCR(sctps_datadropchklmt);
1724 SCTP_STAT_INCR(sctps_datadroprwnd);
1731 strmno = ntohs(ch->dp.stream_id);
1732 if (strmno >= asoc->streamincnt) {
1733 struct sctp_paramhdr *phdr;
1736 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1737 0, M_DONTWAIT, 1, MT_DATA);
1739 /* add some space up front so prepend will work well */
1740 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1741 phdr = mtod(mb, struct sctp_paramhdr *);
1743 * Error causes are just param's and this one has
1744 * two back to back phdr, one with the error type
1745 * and size, the other with the streamid and a rsvd
1747 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1748 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1749 phdr->param_length =
1750 htons(sizeof(struct sctp_paramhdr) * 2);
1752 /* We insert the stream in the type field */
1753 phdr->param_type = ch->dp.stream_id;
1754 /* And set the length to 0 for the rsvd field */
1755 phdr->param_length = 0;
1756 sctp_queue_op_err(stcb, mb);
1758 SCTP_STAT_INCR(sctps_badsid);
1759 SCTP_TCB_LOCK_ASSERT(stcb);
1760 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1761 /* EY set this tsn present in nr_sack's nr_mapping_array */
1762 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1763 SCTP_TCB_LOCK_ASSERT(stcb);
1764 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1765 SCTP_REVERSE_OUT_TSN_PRES(gap, tsn, asoc);
1767 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1768 /* we have a new high score */
1769 asoc->highest_tsn_inside_map = tsn;
1770 /* EY nr_sack version of the above */
1771 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
1772 asoc->highest_tsn_inside_nr_map = tsn;
1773 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1774 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1777 if (tsn == (asoc->cumulative_tsn + 1)) {
1778 /* Update cum-ack */
1779 asoc->cumulative_tsn = tsn;
1784 * Before we continue lets validate that we are not being fooled by
1785 * an evil attacker. We can only have 4k chunks based on our TSN
1786 * spread allowed by the mapping array 512 * 8 bits, so there is no
1787 * way our stream sequence numbers could have wrapped. We of course
1788 * only validate the FIRST fragment so the bit must be set.
1790 strmseq = ntohs(ch->dp.stream_sequence);
1791 #ifdef SCTP_ASOCLOG_OF_TSNS
1792 SCTP_TCB_LOCK_ASSERT(stcb);
1793 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1794 asoc->tsn_in_at = 0;
1795 asoc->tsn_in_wrapped = 1;
1797 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1798 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1799 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1800 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1801 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1802 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1803 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1804 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1807 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1808 (TAILQ_EMPTY(&asoc->resetHead)) &&
1809 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1810 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1811 strmseq, MAX_SEQ) ||
1812 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1813 /* The incoming sseq is behind where we last delivered? */
1814 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1815 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1816 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1817 0, M_DONTWAIT, 1, MT_DATA);
1819 struct sctp_paramhdr *ph;
1822 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1823 (3 * sizeof(uint32_t));
1824 ph = mtod(oper, struct sctp_paramhdr *);
1825 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1826 ph->param_length = htons(SCTP_BUF_LEN(oper));
1827 ippp = (uint32_t *) (ph + 1);
1828 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1832 *ippp = ((strmno << 16) | strmseq);
1835 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1836 sctp_abort_an_association(stcb->sctp_ep, stcb,
1837 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1841 /************************************
1842 * From here down we may find ch-> invalid
1843 * so its a good idea NOT to use it.
1844 *************************************/
1846 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1847 if (last_chunk == 0) {
1848 dmbuf = SCTP_M_COPYM(*m,
1849 (offset + sizeof(struct sctp_data_chunk)),
1850 the_len, M_DONTWAIT);
1851 #ifdef SCTP_MBUF_LOGGING
1852 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1857 if (SCTP_BUF_IS_EXTENDED(mat)) {
1858 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1860 mat = SCTP_BUF_NEXT(mat);
1865 /* We can steal the last chunk */
1869 /* lop off the top part */
1870 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1871 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1872 l_len = SCTP_BUF_LEN(dmbuf);
1875 * need to count up the size hopefully does not hit
1883 l_len += SCTP_BUF_LEN(lat);
1884 lat = SCTP_BUF_NEXT(lat);
1887 if (l_len > the_len) {
1888 /* Trim the end round bytes off too */
1889 m_adj(dmbuf, -(l_len - the_len));
1892 if (dmbuf == NULL) {
1893 SCTP_STAT_INCR(sctps_nomem);
1896 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1897 asoc->fragmented_delivery_inprogress == 0 &&
1898 TAILQ_EMPTY(&asoc->resetHead) &&
1900 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1901 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1902 /* Candidate for express delivery */
1904 * Its not fragmented, No PD-API is up, Nothing in the
1905 * delivery queue, Its un-ordered OR ordered and the next to
1906 * deliver AND nothing else is stuck on the stream queue,
1907 * And there is room for it in the socket buffer. Lets just
1908 * stuff it up the buffer....
1911 /* It would be nice to avoid this copy if we could :< */
1912 sctp_alloc_a_readq(stcb, control);
1913 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1919 if (control == NULL) {
1920 goto failed_express_del;
1922 sctp_add_to_readq(stcb->sctp_ep, stcb,
1923 control, &stcb->sctp_socket->so_rcv,
1924 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1927 * EY here I should check if this delivered tsn is
1928 * out_of_order, if yes then update the nr_map
1930 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1932 * EY check if the mapping_array and nr_mapping
1933 * array are consistent
1935 if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
1938 * sctp_process_a_data_chunk(5): Something
1939 * is wrong the map base tsn" "\nEY-and
1940 * nr_map base tsn should be equal.");
1942 /* EY debugging block */
1945 * printf("\nEY-Calculating an
1946 * nr_gap!!\nmapping_array_size = %d
1947 * nr_mapping_array_size = %d"
1948 * "\nEY-mapping_array_base = %d
1949 * nr_mapping_array_base =
1950 * %d\nEY-highest_tsn_inside_map = %d"
1951 * "highest_tsn_inside_nr_map = %d\nEY-TSN =
1952 * %d nr_gap = %d",asoc->mapping_array_size,
1953 * asoc->nr_mapping_array_size,
1954 * asoc->mapping_array_base_tsn,
1955 * asoc->nr_mapping_array_base_tsn,
1956 * asoc->highest_tsn_inside_map,
1957 * asoc->highest_tsn_inside_nr_map,tsn,nr_gap
1961 /* EY - not %100 sure about the lock thing */
1962 SCTP_TCB_LOCK_ASSERT(stcb);
1963 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
1964 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
1965 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
1966 asoc->highest_tsn_inside_nr_map = tsn;
1968 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1969 /* for ordered, bump what we delivered */
1970 asoc->strmin[strmno].last_sequence_delivered++;
1972 SCTP_STAT_INCR(sctps_recvexpress);
1973 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1974 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1975 SCTP_STR_LOG_FROM_EXPRS_DEL);
1978 goto finish_express_del;
1981 /* If we reach here this is a new chunk */
1984 /* Express for fragmented delivery? */
1985 if ((asoc->fragmented_delivery_inprogress) &&
1986 (stcb->asoc.control_pdapi) &&
1987 (asoc->str_of_pdapi == strmno) &&
1988 (asoc->ssn_of_pdapi == strmseq)
1990 control = stcb->asoc.control_pdapi;
1991 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1992 /* Can't be another first? */
1993 goto failed_pdapi_express_del;
1995 if (tsn == (control->sinfo_tsn + 1)) {
1996 /* Yep, we can add it on */
2000 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
2003 cumack = asoc->cumulative_tsn;
2004 if ((cumack + 1) == tsn)
2007 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
2009 &stcb->sctp_socket->so_rcv)) {
2010 SCTP_PRINTF("Append fails end:%d\n", end);
2011 goto failed_pdapi_express_del;
2014 * EY It is appended to the read queue in prev if
2015 * block here I should check if this delivered tsn
2016 * is out_of_order, if yes then update the nr_map
2018 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2019 /* EY debugging block */
2022 * printf("\nEY-Calculating an
2023 * nr_gap!!\nEY-mapping_array_size =
2024 * %d nr_mapping_array_size = %d"
2025 * "\nEY-mapping_array_base = %d
2026 * nr_mapping_array_base =
2027 * %d\nEY-highest_tsn_inside_map =
2028 * %d" "highest_tsn_inside_nr_map =
2029 * %d\nEY-TSN = %d nr_gap =
2030 * %d",asoc->mapping_array_size,
2031 * asoc->nr_mapping_array_size,
2032 * asoc->mapping_array_base_tsn,
2033 * asoc->nr_mapping_array_base_tsn,
2034 * asoc->highest_tsn_inside_map,
2035 * asoc->highest_tsn_inside_nr_map,ts
2039 /* EY - not %100 sure about the lock thing */
2040 SCTP_TCB_LOCK_ASSERT(stcb);
2041 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2042 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
2043 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2044 asoc->highest_tsn_inside_nr_map = tsn;
2046 SCTP_STAT_INCR(sctps_recvexpressm);
2047 control->sinfo_tsn = tsn;
2048 asoc->tsn_last_delivered = tsn;
2049 asoc->fragment_flags = chunk_flags;
2050 asoc->tsn_of_pdapi_last_delivered = tsn;
2051 asoc->last_flags_delivered = chunk_flags;
2052 asoc->last_strm_seq_delivered = strmseq;
2053 asoc->last_strm_no_delivered = strmno;
2055 /* clean up the flags and such */
2056 asoc->fragmented_delivery_inprogress = 0;
2057 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2058 asoc->strmin[strmno].last_sequence_delivered++;
2060 stcb->asoc.control_pdapi = NULL;
2061 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
2063 * There could be another message
2066 need_reasm_check = 1;
2070 goto finish_express_del;
2073 failed_pdapi_express_del:
2075 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2076 sctp_alloc_a_chunk(stcb, chk);
2078 /* No memory so we drop the chunk */
2079 SCTP_STAT_INCR(sctps_nomem);
2080 if (last_chunk == 0) {
2081 /* we copied it, free the copy */
2082 sctp_m_freem(dmbuf);
2086 chk->rec.data.TSN_seq = tsn;
2087 chk->no_fr_allowed = 0;
2088 chk->rec.data.stream_seq = strmseq;
2089 chk->rec.data.stream_number = strmno;
2090 chk->rec.data.payloadtype = protocol_id;
2091 chk->rec.data.context = stcb->asoc.context;
2092 chk->rec.data.doing_fast_retransmit = 0;
2093 chk->rec.data.rcv_flags = chunk_flags;
2095 chk->send_size = the_len;
2097 atomic_add_int(&net->ref_count, 1);
2100 sctp_alloc_a_readq(stcb, control);
2101 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2107 if (control == NULL) {
2108 /* No memory so we drop the chunk */
2109 SCTP_STAT_INCR(sctps_nomem);
2110 if (last_chunk == 0) {
2111 /* we copied it, free the copy */
2112 sctp_m_freem(dmbuf);
2116 control->length = the_len;
2119 /* Mark it as received */
2120 /* Now queue it where it belongs */
2121 if (control != NULL) {
2122 /* First a sanity check */
2123 if (asoc->fragmented_delivery_inprogress) {
2125 * Ok, we have a fragmented delivery in progress if
2126 * this chunk is next to deliver OR belongs in our
2127 * view to the reassembly, the peer is evil or
2130 uint32_t estimate_tsn;
2132 estimate_tsn = asoc->tsn_last_delivered + 1;
2133 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2134 (estimate_tsn == control->sinfo_tsn)) {
2135 /* Evil/Broke peer */
2136 sctp_m_freem(control->data);
2137 control->data = NULL;
2138 if (control->whoFrom) {
2139 sctp_free_remote_addr(control->whoFrom);
2140 control->whoFrom = NULL;
2142 sctp_free_a_readq(stcb, control);
2143 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2144 0, M_DONTWAIT, 1, MT_DATA);
2146 struct sctp_paramhdr *ph;
2149 SCTP_BUF_LEN(oper) =
2150 sizeof(struct sctp_paramhdr) +
2151 (3 * sizeof(uint32_t));
2152 ph = mtod(oper, struct sctp_paramhdr *);
2154 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2155 ph->param_length = htons(SCTP_BUF_LEN(oper));
2156 ippp = (uint32_t *) (ph + 1);
2157 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
2161 *ippp = ((strmno << 16) | strmseq);
2163 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
2164 sctp_abort_an_association(stcb->sctp_ep, stcb,
2165 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2170 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2171 sctp_m_freem(control->data);
2172 control->data = NULL;
2173 if (control->whoFrom) {
2174 sctp_free_remote_addr(control->whoFrom);
2175 control->whoFrom = NULL;
2177 sctp_free_a_readq(stcb, control);
2179 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2180 0, M_DONTWAIT, 1, MT_DATA);
2182 struct sctp_paramhdr *ph;
2185 SCTP_BUF_LEN(oper) =
2186 sizeof(struct sctp_paramhdr) +
2187 (3 * sizeof(uint32_t));
2189 struct sctp_paramhdr *);
2191 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2193 htons(SCTP_BUF_LEN(oper));
2194 ippp = (uint32_t *) (ph + 1);
2195 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
2199 *ippp = ((strmno << 16) | strmseq);
2201 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2202 sctp_abort_an_association(stcb->sctp_ep,
2203 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2210 /* No PDAPI running */
2211 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2213 * Reassembly queue is NOT empty validate
2214 * that this tsn does not need to be in
2215 * reasembly queue. If it does then our peer
2216 * is broken or evil.
2218 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2219 sctp_m_freem(control->data);
2220 control->data = NULL;
2221 if (control->whoFrom) {
2222 sctp_free_remote_addr(control->whoFrom);
2223 control->whoFrom = NULL;
2225 sctp_free_a_readq(stcb, control);
2226 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2227 0, M_DONTWAIT, 1, MT_DATA);
2229 struct sctp_paramhdr *ph;
2232 SCTP_BUF_LEN(oper) =
2233 sizeof(struct sctp_paramhdr) +
2234 (3 * sizeof(uint32_t));
2236 struct sctp_paramhdr *);
2238 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2240 htons(SCTP_BUF_LEN(oper));
2241 ippp = (uint32_t *) (ph + 1);
2242 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2246 *ippp = ((strmno << 16) | strmseq);
2248 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2249 sctp_abort_an_association(stcb->sctp_ep,
2250 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2257 /* ok, if we reach here we have passed the sanity checks */
2258 if (chunk_flags & SCTP_DATA_UNORDERED) {
2259 /* queue directly into socket buffer */
2260 sctp_add_to_readq(stcb->sctp_ep, stcb,
2262 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2265 * EY It is added to the read queue in prev if block
2266 * here I should check if this delivered tsn is
2267 * out_of_order, if yes then update the nr_map
2269 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2271 * EY check if the mapping_array and
2272 * nr_mapping array are consistent
2274 if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
2277 * sctp_process_a_data_chunk(6):
2278 * Something is wrong the map base
2279 * tsn" "\nEY-and nr_map base tsn
2280 * should be equal.");
2283 * EY - not %100 sure about the lock
2284 * thing, i think we don't need the
2287 /* SCTP_TCB_LOCK_ASSERT(stcb); */
2290 * printf("\nEY-Calculating an
2291 * nr_gap!!\nEY-mapping_array_size =
2292 * %d nr_mapping_array_size = %d"
2293 * "\nEY-mapping_array_base = %d
2294 * nr_mapping_array_base =
2295 * %d\nEY-highest_tsn_inside_map =
2296 * %d" "highest_tsn_inside_nr_map =
2297 * %d\nEY-TSN = %d nr_gap =
2298 * %d",asoc->mapping_array_size,
2299 * asoc->nr_mapping_array_size,
2300 * asoc->mapping_array_base_tsn,
2301 * asoc->nr_mapping_array_base_tsn,
2302 * asoc->highest_tsn_inside_map,
2303 * asoc->highest_tsn_inside_nr_map,ts
2307 SCTP_TCB_LOCK_ASSERT(stcb);
2308 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2309 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
2310 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2311 asoc->highest_tsn_inside_nr_map = tsn;
2315 * Special check for when streams are resetting. We
2316 * could be more smart about this and check the
2317 * actual stream to see if it is not being reset..
2318 * that way we would not create a HOLB when amongst
2319 * streams being reset and those not being reset.
2321 * We take complete messages that have a stream reset
2322 * intervening (aka the TSN is after where our
2323 * cum-ack needs to be) off and put them on a
2324 * pending_reply_queue. The reassembly ones we do
2325 * not have to worry about since they are all sorted
2326 * and proceessed by TSN order. It is only the
2327 * singletons I must worry about.
2329 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2330 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2333 * yep its past where we need to reset... go
2334 * ahead and queue it.
2336 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2338 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2340 struct sctp_queued_to_read *ctlOn;
2341 unsigned char inserted = 0;
2343 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2345 if (compare_with_wrap(control->sinfo_tsn,
2346 ctlOn->sinfo_tsn, MAX_TSN)) {
2347 ctlOn = TAILQ_NEXT(ctlOn, next);
2350 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2355 if (inserted == 0) {
2357 * must be put at end, use
2358 * prevP (all setup from
2359 * loop) to setup nextP.
2361 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2365 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2372 /* Into the re-assembly queue */
2373 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2376 * the assoc is now gone and chk was put onto the
2377 * reasm queue, which has all been freed.
2384 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2385 /* we have a new high score */
2386 asoc->highest_tsn_inside_map = tsn;
2387 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2388 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2391 if (tsn == (asoc->cumulative_tsn + 1)) {
2392 /* Update cum-ack */
2393 asoc->cumulative_tsn = tsn;
2399 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2401 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2403 SCTP_STAT_INCR(sctps_recvdata);
2404 /* Set it present please */
2405 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2406 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2408 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2409 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2410 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2412 SCTP_TCB_LOCK_ASSERT(stcb);
2413 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2415 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
2416 asoc->peer_supports_nr_sack &&
2417 (SCTP_BASE_SYSCTL(sctp_do_drain) == 0)) {
2418 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2419 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
2420 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2421 asoc->highest_tsn_inside_nr_map = tsn;
2424 /* check the special flag for stream resets */
2425 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2426 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2427 (asoc->cumulative_tsn == liste->tsn))
2430 * we have finished working through the backlogged TSN's now
2431 * time to reset streams. 1: call reset function. 2: free
2432 * pending_reply space 3: distribute any chunks in
2433 * pending_reply_queue.
2435 struct sctp_queued_to_read *ctl;
2437 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2438 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2439 SCTP_FREE(liste, SCTP_M_STRESET);
2440 /* sa_ignore FREED_MEMORY */
2441 liste = TAILQ_FIRST(&asoc->resetHead);
2442 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2443 if (ctl && (liste == NULL)) {
2444 /* All can be removed */
2446 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2447 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2451 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2454 /* more than one in queue */
2455 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2457 * if ctl->sinfo_tsn is <= liste->tsn we can
2458 * process it which is the NOT of
2459 * ctl->sinfo_tsn > liste->tsn
2461 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2462 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2466 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2470 * Now service re-assembly to pick up anything that has been
2471 * held on reassembly queue?
2473 sctp_deliver_reasm_check(stcb, asoc);
2474 need_reasm_check = 0;
2476 if (need_reasm_check) {
2477 /* Another one waits ? */
2478 sctp_deliver_reasm_check(stcb, asoc);
2483 int8_t sctp_map_lookup_tab[256] = {
2484 -1, 0, -1, 1, -1, 0, -1, 2,
2485 -1, 0, -1, 1, -1, 0, -1, 3,
2486 -1, 0, -1, 1, -1, 0, -1, 2,
2487 -1, 0, -1, 1, -1, 0, -1, 4,
2488 -1, 0, -1, 1, -1, 0, -1, 2,
2489 -1, 0, -1, 1, -1, 0, -1, 3,
2490 -1, 0, -1, 1, -1, 0, -1, 2,
2491 -1, 0, -1, 1, -1, 0, -1, 5,
2492 -1, 0, -1, 1, -1, 0, -1, 2,
2493 -1, 0, -1, 1, -1, 0, -1, 3,
2494 -1, 0, -1, 1, -1, 0, -1, 2,
2495 -1, 0, -1, 1, -1, 0, -1, 4,
2496 -1, 0, -1, 1, -1, 0, -1, 2,
2497 -1, 0, -1, 1, -1, 0, -1, 3,
2498 -1, 0, -1, 1, -1, 0, -1, 2,
2499 -1, 0, -1, 1, -1, 0, -1, 6,
2500 -1, 0, -1, 1, -1, 0, -1, 2,
2501 -1, 0, -1, 1, -1, 0, -1, 3,
2502 -1, 0, -1, 1, -1, 0, -1, 2,
2503 -1, 0, -1, 1, -1, 0, -1, 4,
2504 -1, 0, -1, 1, -1, 0, -1, 2,
2505 -1, 0, -1, 1, -1, 0, -1, 3,
2506 -1, 0, -1, 1, -1, 0, -1, 2,
2507 -1, 0, -1, 1, -1, 0, -1, 5,
2508 -1, 0, -1, 1, -1, 0, -1, 2,
2509 -1, 0, -1, 1, -1, 0, -1, 3,
2510 -1, 0, -1, 1, -1, 0, -1, 2,
2511 -1, 0, -1, 1, -1, 0, -1, 4,
2512 -1, 0, -1, 1, -1, 0, -1, 2,
2513 -1, 0, -1, 1, -1, 0, -1, 3,
2514 -1, 0, -1, 1, -1, 0, -1, 2,
2515 -1, 0, -1, 1, -1, 0, -1, 7,
2520 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2523 * Now we also need to check the mapping array in a couple of ways.
2524 * 1) Did we move the cum-ack point?
2526 struct sctp_association *asoc;
2528 int last_all_ones = 0;
2529 int slide_from, slide_end, lgap, distance;
2531 /* EY nr_mapping array variables */
2533 /* int nr_last_all_ones = 0; */
2534 /* int nr_slide_from, nr_slide_end, nr_lgap, nr_distance; */
2536 uint32_t old_cumack, old_base, old_highest;
2537 unsigned char aux_array[64];
2540 * EY! Don't think this is required but I am immitating the code for
2541 * map just to make sure
2543 unsigned char nr_aux_array[64];
2548 old_cumack = asoc->cumulative_tsn;
2549 old_base = asoc->mapping_array_base_tsn;
2550 old_highest = asoc->highest_tsn_inside_map;
2551 if (asoc->mapping_array_size < 64)
2552 memcpy(aux_array, asoc->mapping_array,
2553 asoc->mapping_array_size);
2555 memcpy(aux_array, asoc->mapping_array, 64);
2556 /* EY do the same for nr_mapping_array */
2557 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2559 if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
2561 * printf("\nEY-IN sack_check method: \nEY-" "The
2562 * size of map and nr_map are inconsitent")
2565 if (asoc->nr_mapping_array_base_tsn != asoc->mapping_array_base_tsn) {
2567 * printf("\nEY-IN sack_check method VERY CRUCIAL
2568 * error: \nEY-" "The base tsns of map and nr_map
2572 /* EY! just immitating the above code */
2573 if (asoc->nr_mapping_array_size < 64)
2574 memcpy(nr_aux_array, asoc->nr_mapping_array,
2575 asoc->nr_mapping_array_size);
2577 memcpy(aux_array, asoc->nr_mapping_array, 64);
2580 * We could probably improve this a small bit by calculating the
2581 * offset of the current cum-ack as the starting point.
2584 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2586 if (asoc->mapping_array[slide_from] == 0xff) {
2590 /* there is a 0 bit */
2591 at += sctp_map_lookup_tab[asoc->mapping_array[slide_from]];
2596 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones);
2597 /* at is one off, since in the table a embedded -1 is present */
2600 if (compare_with_wrap(asoc->cumulative_tsn,
2601 asoc->highest_tsn_inside_map,
2604 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2605 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2607 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2608 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2609 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2610 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2612 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2613 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2616 if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) {
2617 /* The complete array was completed by a single FR */
2618 /* higest becomes the cum-ack */
2621 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2622 /* clear the array */
2623 clr = (at >> 3) + 1;
2624 if (clr > asoc->mapping_array_size) {
2625 clr = asoc->mapping_array_size;
2627 memset(asoc->mapping_array, 0, clr);
2628 /* base becomes one ahead of the cum-ack */
2629 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2631 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2633 if (clr > asoc->nr_mapping_array_size)
2634 clr = asoc->nr_mapping_array_size;
2636 memset(asoc->nr_mapping_array, 0, clr);
2637 /* base becomes one ahead of the cum-ack */
2638 asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2639 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2641 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2642 sctp_log_map(old_base, old_cumack, old_highest,
2643 SCTP_MAP_PREPARE_SLIDE);
2644 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2645 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2647 } else if (at >= 8) {
2648 /* we can slide the mapping array down */
2649 /* slide_from holds where we hit the first NON 0xff byte */
2652 * now calculate the ceiling of the move using our highest
2655 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2656 lgap = asoc->highest_tsn_inside_map -
2657 asoc->mapping_array_base_tsn;
2659 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2660 asoc->highest_tsn_inside_map + 1;
2662 slide_end = lgap >> 3;
2663 if (slide_end < slide_from) {
2665 panic("impossible slide");
2667 printf("impossible slide?\n");
2671 if (slide_end > asoc->mapping_array_size) {
2673 panic("would overrun buffer");
2675 printf("Gak, would have overrun map end:%d slide_end:%d\n",
2676 asoc->mapping_array_size, slide_end);
2677 slide_end = asoc->mapping_array_size;
2680 distance = (slide_end - slide_from) + 1;
2681 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2682 sctp_log_map(old_base, old_cumack, old_highest,
2683 SCTP_MAP_PREPARE_SLIDE);
2684 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2685 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2687 if (distance + slide_from > asoc->mapping_array_size ||
2690 * Here we do NOT slide forward the array so that
2691 * hopefully when more data comes in to fill it up
2692 * we will be able to slide it forward. Really I
2693 * don't think this should happen :-0
2696 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2697 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2698 (uint32_t) asoc->mapping_array_size,
2699 SCTP_MAP_SLIDE_NONE);
2704 for (ii = 0; ii < distance; ii++) {
2705 asoc->mapping_array[ii] =
2706 asoc->mapping_array[slide_from + ii];
2708 for (ii = distance; ii <= slide_end; ii++) {
2709 asoc->mapping_array[ii] = 0;
2711 asoc->mapping_array_base_tsn += (slide_from << 3);
2712 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2713 sctp_log_map(asoc->mapping_array_base_tsn,
2714 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2715 SCTP_MAP_SLIDE_RESULT);
2718 * EY if doing nr_sacks then slide the
2719 * nr_mapping_array accordingly please
2721 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2722 for (ii = 0; ii < distance; ii++) {
2723 asoc->nr_mapping_array[ii] =
2724 asoc->nr_mapping_array[slide_from + ii];
2726 for (ii = distance; ii <= slide_end; ii++) {
2727 asoc->nr_mapping_array[ii] = 0;
2729 asoc->nr_mapping_array_base_tsn += (slide_from << 3);
2734 * Now we need to see if we need to queue a sack or just start the
2735 * timer (if allowed).
2738 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2740 * Ok special case, in SHUTDOWN-SENT case. here we
2741 * maker sure SACK timer is off and instead send a
2742 * SHUTDOWN and a SACK
2744 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2745 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2746 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2748 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2750 * EY if nr_sacks used then send an nr-sack , a sack
2753 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2754 sctp_send_nr_sack(stcb);
2756 sctp_send_sack(stcb);
2760 /* is there a gap now ? */
2761 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2762 stcb->asoc.cumulative_tsn, MAX_TSN);
2765 * CMT DAC algorithm: increase number of packets
2766 * received since last ack
2768 stcb->asoc.cmt_dac_pkts_rcvd++;
2770 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2772 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2774 (stcb->asoc.numduptsns) || /* we have dup's */
2775 (is_a_gap) || /* is still a gap */
2776 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2777 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2780 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2781 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2782 (stcb->asoc.send_sack == 0) &&
2783 (stcb->asoc.numduptsns == 0) &&
2784 (stcb->asoc.delayed_ack) &&
2785 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2788 * CMT DAC algorithm: With CMT,
2789 * delay acks even in the face of
2791 * reordering. Therefore, if acks that
2792 * do not have to be sent because of
2793 * the above reasons, will be
2794 * delayed. That is, acks that would
2795 * have been sent due to gap reports
2796 * will be delayed with DAC. Start
2797 * the delayed ack timer.
2799 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2800 stcb->sctp_ep, stcb, NULL);
2803 * Ok we must build a SACK since the
2804 * timer is pending, we got our
2805 * first packet OR there are gaps or
2808 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2810 * EY if nr_sacks used then send an
2811 * nr-sack , a sack otherwise
2813 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2814 sctp_send_nr_sack(stcb);
2816 sctp_send_sack(stcb);
2819 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2820 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2821 stcb->sctp_ep, stcb, NULL);
2829 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2831 struct sctp_tmit_chunk *chk;
2832 uint32_t tsize, pd_point;
2835 if (asoc->fragmented_delivery_inprogress) {
2836 sctp_service_reassembly(stcb, asoc);
2838 /* Can we proceed further, i.e. the PD-API is complete */
2839 if (asoc->fragmented_delivery_inprogress) {
2844 * Now is there some other chunk I can deliver from the reassembly
2848 chk = TAILQ_FIRST(&asoc->reasmqueue);
2850 asoc->size_on_reasm_queue = 0;
2851 asoc->cnt_on_reasm_queue = 0;
2854 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2855 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2856 ((nxt_todel == chk->rec.data.stream_seq) ||
2857 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2859 * Yep the first one is here. We setup to start reception,
2860 * by backing down the TSN just in case we can't deliver.
2864 * Before we start though either all of the message should
2865 * be here or the socket buffer max or nothing on the
2866 * delivery queue and something can be delivered.
2868 if (stcb->sctp_socket) {
2869 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2870 stcb->sctp_ep->partial_delivery_point);
2872 pd_point = stcb->sctp_ep->partial_delivery_point;
2874 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2875 asoc->fragmented_delivery_inprogress = 1;
2876 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2877 asoc->str_of_pdapi = chk->rec.data.stream_number;
2878 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2879 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2880 asoc->fragment_flags = chk->rec.data.rcv_flags;
2881 sctp_service_reassembly(stcb, asoc);
2882 if (asoc->fragmented_delivery_inprogress == 0) {
2890 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2891 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2892 struct sctp_nets *net, uint32_t * high_tsn)
2894 struct sctp_data_chunk *ch, chunk_buf;
2895 struct sctp_association *asoc;
2896 int num_chunks = 0; /* number of control chunks processed */
2898 int chk_length, break_flag, last_chunk;
2899 int abort_flag = 0, was_a_gap = 0;
2903 sctp_set_rwnd(stcb, &stcb->asoc);
2906 SCTP_TCB_LOCK_ASSERT(stcb);
2908 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2909 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2910 /* there was a gap before this data was processed */
2914 * setup where we got the last DATA packet from for any SACK that
2915 * may need to go out. Don't bump the net. This is done ONLY when a
2916 * chunk is assigned.
2918 asoc->last_data_chunk_from = net;
2921 * Now before we proceed we must figure out if this is a wasted
2922 * cluster... i.e. it is a small packet sent in and yet the driver
2923 * underneath allocated a full cluster for it. If so we must copy it
2924 * to a smaller mbuf and free up the cluster mbuf. This will help
2925 * with cluster starvation. Note for __Panda__ we don't do this
2926 * since it has clusters all the way down to 64 bytes.
2928 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2929 /* we only handle mbufs that are singletons.. not chains */
2930 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2932 /* ok lets see if we can copy the data up */
2935 /* get the pointers and copy */
2936 to = mtod(m, caddr_t *);
2937 from = mtod((*mm), caddr_t *);
2938 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2939 /* copy the length and free up the old */
2940 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2942 /* sucess, back copy */
2945 /* We are in trouble in the mbuf world .. yikes */
2949 /* get pointer to the first chunk header */
2950 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2951 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2956 * process all DATA chunks...
2958 *high_tsn = asoc->cumulative_tsn;
2960 asoc->data_pkts_seen++;
2961 while (stop_proc == 0) {
2962 /* validate chunk length */
2963 chk_length = ntohs(ch->ch.chunk_length);
2964 if (length - *offset < chk_length) {
2965 /* all done, mutulated chunk */
2969 if (ch->ch.chunk_type == SCTP_DATA) {
2970 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2972 * Need to send an abort since we had a
2973 * invalid data chunk.
2975 struct mbuf *op_err;
2977 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2978 0, M_DONTWAIT, 1, MT_DATA);
2981 struct sctp_paramhdr *ph;
2984 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2985 (2 * sizeof(uint32_t));
2986 ph = mtod(op_err, struct sctp_paramhdr *);
2988 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2989 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2990 ippp = (uint32_t *) (ph + 1);
2991 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2993 *ippp = asoc->cumulative_tsn;
2996 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2997 sctp_abort_association(inp, stcb, m, iphlen, sh,
2998 op_err, 0, net->port);
3001 #ifdef SCTP_AUDITING_ENABLED
3002 sctp_audit_log(0xB1, 0);
3004 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
3009 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
3010 chk_length, net, high_tsn, &abort_flag, &break_flag,
3019 * Set because of out of rwnd space and no
3020 * drop rep space left.
3026 /* not a data chunk in the data region */
3027 switch (ch->ch.chunk_type) {
3028 case SCTP_INITIATION:
3029 case SCTP_INITIATION_ACK:
3030 case SCTP_SELECTIVE_ACK:
3031 case SCTP_NR_SELECTIVE_ACK: /* EY */
3032 case SCTP_HEARTBEAT_REQUEST:
3033 case SCTP_HEARTBEAT_ACK:
3034 case SCTP_ABORT_ASSOCIATION:
3036 case SCTP_SHUTDOWN_ACK:
3037 case SCTP_OPERATION_ERROR:
3038 case SCTP_COOKIE_ECHO:
3039 case SCTP_COOKIE_ACK:
3042 case SCTP_SHUTDOWN_COMPLETE:
3043 case SCTP_AUTHENTICATION:
3044 case SCTP_ASCONF_ACK:
3045 case SCTP_PACKET_DROPPED:
3046 case SCTP_STREAM_RESET:
3047 case SCTP_FORWARD_CUM_TSN:
3050 * Now, what do we do with KNOWN chunks that
3051 * are NOT in the right place?
3053 * For now, I do nothing but ignore them. We
3054 * may later want to add sysctl stuff to
3055 * switch out and do either an ABORT() or
3056 * possibly process them.
3058 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
3059 struct mbuf *op_err;
3061 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
3062 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
3067 /* unknown chunk type, use bit rules */
3068 if (ch->ch.chunk_type & 0x40) {
3069 /* Add a error report to the queue */
3071 struct sctp_paramhdr *phd;
3073 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
3075 phd = mtod(merr, struct sctp_paramhdr *);
3077 * We cheat and use param
3078 * type since we did not
3079 * bother to define a error
3080 * cause struct. They are
3081 * the same basic format
3082 * with different names.
3085 htons(SCTP_CAUSE_UNRECOG_CHUNK);
3087 htons(chk_length + sizeof(*phd));
3088 SCTP_BUF_LEN(merr) = sizeof(*phd);
3089 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
3090 SCTP_SIZE32(chk_length),
3092 if (SCTP_BUF_NEXT(merr)) {
3093 sctp_queue_op_err(stcb, merr);
3099 if ((ch->ch.chunk_type & 0x80) == 0) {
3100 /* discard the rest of this packet */
3102 } /* else skip this bad chunk and
3105 }; /* switch of chunk type */
3107 *offset += SCTP_SIZE32(chk_length);
3108 if ((*offset >= length) || stop_proc) {
3109 /* no more data left in the mbuf chain */
3113 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
3114 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
3124 * we need to report rwnd overrun drops.
3126 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
3130 * Did we get data, if so update the time for auto-close and
3131 * give peer credit for being alive.
3133 SCTP_STAT_INCR(sctps_recvpktwithdata);
3134 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3135 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3136 stcb->asoc.overall_error_count,
3138 SCTP_FROM_SCTP_INDATA,
3141 stcb->asoc.overall_error_count = 0;
3142 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
3144 /* now service all of the reassm queue if needed */
3145 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
3146 sctp_service_queues(stcb, asoc);
3148 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
3149 /* Assure that we ack right away */
3150 stcb->asoc.send_sack = 1;
3152 /* Start a sack timer or QUEUE a SACK for sending */
3153 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
3154 (stcb->asoc.mapping_array[0] != 0xff)) {
3155 if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) ||
3156 (stcb->asoc.delayed_ack == 0) ||
3157 (stcb->asoc.numduptsns) ||
3158 (stcb->asoc.send_sack == 1)) {
3159 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3160 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
3163 * EY if nr_sacks used then send an nr-sack , a sack
3166 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
3167 sctp_send_nr_sack(stcb);
3169 sctp_send_sack(stcb);
3171 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3172 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
3173 stcb->sctp_ep, stcb, NULL);
3177 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
3186 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3187 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3188 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3189 int num_seg, int *ecn_seg_sums)
3191 /************************************************/
3192 /* process fragments and update sendqueue */
3193 /************************************************/
3194 struct sctp_sack *sack;
3195 struct sctp_gap_ack_block *frag, block;
3196 struct sctp_tmit_chunk *tp1;
3198 unsigned int theTSN;
3201 uint16_t frag_strt, frag_end, primary_flag_set;
3202 u_long last_frag_high;
3205 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
3207 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3208 primary_flag_set = 1;
3210 primary_flag_set = 0;
3214 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3215 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3216 *offset += sizeof(block);
3222 for (i = 0; i < num_seg; i++) {
3223 frag_strt = ntohs(frag->start);
3224 frag_end = ntohs(frag->end);
3225 /* some sanity checks on the fragment offsets */
3226 if (frag_strt > frag_end) {
3227 /* this one is malformed, skip */
3231 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3233 *biggest_tsn_acked = frag_end + last_tsn;
3235 /* mark acked dgs and find out the highestTSN being acked */
3237 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3239 /* save the locations of the last frags */
3240 last_frag_high = frag_end + last_tsn;
3243 * now lets see if we need to reset the queue due to
3244 * a out-of-order SACK fragment
3246 if (compare_with_wrap(frag_strt + last_tsn,
3247 last_frag_high, MAX_TSN)) {
3249 * if the new frag starts after the last TSN
3250 * frag covered, we are ok and this one is
3251 * beyond the last one
3256 * ok, they have reset us, so we need to
3257 * reset the queue this will cause extra
3258 * hunting but hey, they chose the
3259 * performance hit when they failed to order
3262 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3264 last_frag_high = frag_end + last_tsn;
3266 for (j = frag_strt; j <= frag_end; j++) {
3267 theTSN = j + last_tsn;
3269 if (tp1->rec.data.doing_fast_retransmit)
3273 * CMT: CUCv2 algorithm. For each TSN being
3274 * processed from the sent queue, track the
3275 * next expected pseudo-cumack, or
3276 * rtx_pseudo_cumack, if required. Separate
3277 * cumack trackers for first transmissions,
3278 * and retransmissions.
3280 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3281 (tp1->snd_count == 1)) {
3282 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
3283 tp1->whoTo->find_pseudo_cumack = 0;
3285 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3286 (tp1->snd_count > 1)) {
3287 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
3288 tp1->whoTo->find_rtx_pseudo_cumack = 0;
3290 if (tp1->rec.data.TSN_seq == theTSN) {
3291 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3293 * must be held until
3297 * ECN Nonce: Add the nonce
3298 * value to the sender's
3301 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3303 * If it is less than RESEND, it is
3304 * now no-longer in flight.
3305 * Higher values may already be set
3306 * via previous Gap Ack Blocks...
3307 * i.e. ACKED or RESEND.
3309 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3310 *biggest_newly_acked_tsn, MAX_TSN)) {
3311 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
3320 * this_sack_highest_
3324 if (tp1->rec.data.chunk_was_revoked == 0)
3325 tp1->whoTo->saw_newack = 1;
3327 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3328 tp1->whoTo->this_sack_highest_newack,
3330 tp1->whoTo->this_sack_highest_newack =
3331 tp1->rec.data.TSN_seq;
3336 * this_sack_lowest_n
3339 if (*this_sack_lowest_newack == 0) {
3340 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3341 sctp_log_sack(*this_sack_lowest_newack,
3343 tp1->rec.data.TSN_seq,
3346 SCTP_LOG_TSN_ACKED);
3348 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
3353 * (rtx-)pseudo-cumac
3358 * (rtx-)pseudo-cumac
3360 * new_(rtx_)pseudo_c
3368 * (rtx-)pseudo-cumac
3376 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
3377 if (tp1->rec.data.chunk_was_revoked == 0) {
3378 tp1->whoTo->new_pseudo_cumack = 1;
3380 tp1->whoTo->find_pseudo_cumack = 1;
3382 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3383 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3385 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
3386 if (tp1->rec.data.chunk_was_revoked == 0) {
3387 tp1->whoTo->new_pseudo_cumack = 1;
3389 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3391 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3392 sctp_log_sack(*biggest_newly_acked_tsn,
3394 tp1->rec.data.TSN_seq,
3397 SCTP_LOG_TSN_ACKED);
3399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3400 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3401 tp1->whoTo->flight_size,
3403 (uintptr_t) tp1->whoTo,
3404 tp1->rec.data.TSN_seq);
3406 sctp_flight_size_decrease(tp1);
3407 sctp_total_flight_decrease(stcb, tp1);
3409 tp1->whoTo->net_ack += tp1->send_size;
3410 if (tp1->snd_count < 2) {
3416 tp1->whoTo->net_ack2 += tp1->send_size;
3423 sctp_calculate_rto(stcb,
3426 &tp1->sent_rcv_time,
3427 sctp_align_safe_nocopy);
3432 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3433 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3434 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3435 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3436 asoc->this_sack_highest_gap,
3438 asoc->this_sack_highest_gap =
3439 tp1->rec.data.TSN_seq;
3441 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3442 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3443 #ifdef SCTP_AUDITING_ENABLED
3444 sctp_audit_log(0xB2,
3445 (asoc->sent_queue_retran_cnt & 0x000000ff));
3450 * All chunks NOT UNSENT
3451 * fall through here and are
3452 * marked (leave PR-SCTP
3453 * ones that are to skip
3456 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3457 tp1->sent = SCTP_DATAGRAM_MARKED;
3459 if (tp1->rec.data.chunk_was_revoked) {
3460 /* deflate the cwnd */
3461 tp1->whoTo->cwnd -= tp1->book_size;
3462 tp1->rec.data.chunk_was_revoked = 0;
3466 } /* if (tp1->TSN_seq == theTSN) */
3467 if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3471 tp1 = TAILQ_NEXT(tp1, sctp_next);
3472 } /* end while (tp1) */
3473 } /* end for (j = fragStart */
3474 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3475 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3476 *offset += sizeof(block);
3481 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3483 sctp_log_fr(*biggest_tsn_acked,
3484 *biggest_newly_acked_tsn,
3485 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3490 sctp_check_for_revoked(struct sctp_tcb *stcb,
3491 struct sctp_association *asoc, uint32_t cumack,
3492 u_long biggest_tsn_acked)
3494 struct sctp_tmit_chunk *tp1;
3495 int tot_revoked = 0;
3497 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3499 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3502 * ok this guy is either ACK or MARKED. If it is
3503 * ACKED it has been previously acked but not this
3504 * time i.e. revoked. If it is MARKED it was ACK'ed
3507 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3512 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3513 /* it has been revoked */
3514 tp1->sent = SCTP_DATAGRAM_SENT;
3515 tp1->rec.data.chunk_was_revoked = 1;
3517 * We must add this stuff back in to assure
3518 * timers and such get started.
3520 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3521 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3522 tp1->whoTo->flight_size,
3524 (uintptr_t) tp1->whoTo,
3525 tp1->rec.data.TSN_seq);
3527 sctp_flight_size_increase(tp1);
3528 sctp_total_flight_increase(stcb, tp1);
3530 * We inflate the cwnd to compensate for our
3531 * artificial inflation of the flight_size.
3533 tp1->whoTo->cwnd += tp1->book_size;
3535 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3536 sctp_log_sack(asoc->last_acked_seq,
3538 tp1->rec.data.TSN_seq,
3541 SCTP_LOG_TSN_REVOKED);
3543 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3544 /* it has been re-acked in this SACK */
3545 tp1->sent = SCTP_DATAGRAM_ACKED;
3548 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3550 tp1 = TAILQ_NEXT(tp1, sctp_next);
3552 if (tot_revoked > 0) {
3554 * Setup the ecn nonce re-sync point. We do this since once
3555 * data is revoked we begin to retransmit things, which do
3556 * NOT have the ECN bits set. This means we are now out of
3557 * sync and must wait until we get back in sync with the
3558 * peer to check ECN bits.
3560 tp1 = TAILQ_FIRST(&asoc->send_queue);
3562 asoc->nonce_resync_tsn = asoc->sending_seq;
3564 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3566 asoc->nonce_wait_for_ecne = 0;
3567 asoc->nonce_sum_check = 0;
3573 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3574 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3576 struct sctp_tmit_chunk *tp1;
3577 int strike_flag = 0;
3579 int tot_retrans = 0;
3580 uint32_t sending_seq;
3581 struct sctp_nets *net;
3582 int num_dests_sacked = 0;
3585 * select the sending_seq, this is either the next thing ready to be
3586 * sent but not transmitted, OR, the next seq we assign.
3588 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3590 sending_seq = asoc->sending_seq;
3592 sending_seq = tp1->rec.data.TSN_seq;
3595 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3596 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3597 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3598 if (net->saw_newack)
3602 if (stcb->asoc.peer_supports_prsctp) {
3603 (void)SCTP_GETTIME_TIMEVAL(&now);
3605 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3608 if (tp1->no_fr_allowed) {
3609 /* this one had a timeout or something */
3610 tp1 = TAILQ_NEXT(tp1, sctp_next);
3613 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3614 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3615 sctp_log_fr(biggest_tsn_newly_acked,
3616 tp1->rec.data.TSN_seq,
3618 SCTP_FR_LOG_CHECK_STRIKE);
3620 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3622 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3626 if (stcb->asoc.peer_supports_prsctp) {
3627 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3628 /* Is it expired? */
3631 * TODO sctp_constants.h needs alternative
3632 * time macros when _KERNEL is undefined.
3634 (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3636 /* Yes so drop it */
3637 if (tp1->data != NULL) {
3638 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3639 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3640 SCTP_SO_NOT_LOCKED);
3642 tp1 = TAILQ_NEXT(tp1, sctp_next);
3647 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3648 asoc->this_sack_highest_gap, MAX_TSN)) {
3649 /* we are beyond the tsn in the sack */
3652 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3653 /* either a RESEND, ACKED, or MARKED */
3655 tp1 = TAILQ_NEXT(tp1, sctp_next);
3659 * CMT : SFR algo (covers part of DAC and HTNA as well)
3661 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3663 * No new acks were receieved for data sent to this
3664 * dest. Therefore, according to the SFR algo for
3665 * CMT, no data sent to this dest can be marked for
3666 * FR using this SACK.
3668 tp1 = TAILQ_NEXT(tp1, sctp_next);
3670 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3671 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3673 * CMT: New acks were receieved for data sent to
3674 * this dest. But no new acks were seen for data
3675 * sent after tp1. Therefore, according to the SFR
3676 * algo for CMT, tp1 cannot be marked for FR using
3677 * this SACK. This step covers part of the DAC algo
3678 * and the HTNA algo as well.
3680 tp1 = TAILQ_NEXT(tp1, sctp_next);
3684 * Here we check to see if we were have already done a FR
3685 * and if so we see if the biggest TSN we saw in the sack is
3686 * smaller than the recovery point. If so we don't strike
3687 * the tsn... otherwise we CAN strike the TSN.
3690 * @@@ JRI: Check for CMT if (accum_moved &&
3691 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3694 if (accum_moved && asoc->fast_retran_loss_recovery) {
3696 * Strike the TSN if in fast-recovery and cum-ack
3699 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3700 sctp_log_fr(biggest_tsn_newly_acked,
3701 tp1->rec.data.TSN_seq,
3703 SCTP_FR_LOG_STRIKE_CHUNK);
3705 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3708 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3710 * CMT DAC algorithm: If SACK flag is set to
3711 * 0, then lowest_newack test will not pass
3712 * because it would have been set to the
3713 * cumack earlier. If not already to be
3714 * rtx'd, If not a mixed sack and if tp1 is
3715 * not between two sacked TSNs, then mark by
3716 * one more. NOTE that we are marking by one
3717 * additional time since the SACK DAC flag
3718 * indicates that two packets have been
3719 * received after this missing TSN.
3721 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3722 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3723 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3724 sctp_log_fr(16 + num_dests_sacked,
3725 tp1->rec.data.TSN_seq,
3727 SCTP_FR_LOG_STRIKE_CHUNK);
3732 } else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3734 * For those that have done a FR we must take
3735 * special consideration if we strike. I.e the
3736 * biggest_newly_acked must be higher than the
3737 * sending_seq at the time we did the FR.
3740 #ifdef SCTP_FR_TO_ALTERNATE
3742 * If FR's go to new networks, then we must only do
3743 * this for singly homed asoc's. However if the FR's
3744 * go to the same network (Armando's work) then its
3745 * ok to FR multiple times.
3753 if ((compare_with_wrap(biggest_tsn_newly_acked,
3754 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3755 (biggest_tsn_newly_acked ==
3756 tp1->rec.data.fast_retran_tsn)) {
3758 * Strike the TSN, since this ack is
3759 * beyond where things were when we
3762 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3763 sctp_log_fr(biggest_tsn_newly_acked,
3764 tp1->rec.data.TSN_seq,
3766 SCTP_FR_LOG_STRIKE_CHUNK);
3768 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3772 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3774 * CMT DAC algorithm: If
3775 * SACK flag is set to 0,
3776 * then lowest_newack test
3777 * will not pass because it
3778 * would have been set to
3779 * the cumack earlier. If
3780 * not already to be rtx'd,
3781 * If not a mixed sack and
3782 * if tp1 is not between two
3783 * sacked TSNs, then mark by
3784 * one more. NOTE that we
3785 * are marking by one
3786 * additional time since the
3787 * SACK DAC flag indicates
3788 * that two packets have
3789 * been received after this
3792 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3793 (num_dests_sacked == 1) &&
3794 compare_with_wrap(this_sack_lowest_newack,
3795 tp1->rec.data.TSN_seq, MAX_TSN)) {
3796 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3797 sctp_log_fr(32 + num_dests_sacked,
3798 tp1->rec.data.TSN_seq,
3800 SCTP_FR_LOG_STRIKE_CHUNK);
3802 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3810 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3813 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3814 biggest_tsn_newly_acked, MAX_TSN)) {
3816 * We don't strike these: This is the HTNA
3817 * algorithm i.e. we don't strike If our TSN is
3818 * larger than the Highest TSN Newly Acked.
3822 /* Strike the TSN */
3823 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3824 sctp_log_fr(biggest_tsn_newly_acked,
3825 tp1->rec.data.TSN_seq,
3827 SCTP_FR_LOG_STRIKE_CHUNK);
3829 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3832 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3834 * CMT DAC algorithm: If SACK flag is set to
3835 * 0, then lowest_newack test will not pass
3836 * because it would have been set to the
3837 * cumack earlier. If not already to be
3838 * rtx'd, If not a mixed sack and if tp1 is
3839 * not between two sacked TSNs, then mark by
3840 * one more. NOTE that we are marking by one
3841 * additional time since the SACK DAC flag
3842 * indicates that two packets have been
3843 * received after this missing TSN.
3845 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3846 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3848 sctp_log_fr(48 + num_dests_sacked,
3849 tp1->rec.data.TSN_seq,
3851 SCTP_FR_LOG_STRIKE_CHUNK);
3857 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3858 struct sctp_nets *alt;
3860 /* fix counts and things */
3861 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3862 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3863 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3865 (uintptr_t) tp1->whoTo,
3866 tp1->rec.data.TSN_seq);
3869 tp1->whoTo->net_ack++;
3870 sctp_flight_size_decrease(tp1);
3872 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3873 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3874 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3876 /* add back to the rwnd */
3877 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3879 /* remove from the total flight */
3880 sctp_total_flight_decrease(stcb, tp1);
3882 if ((stcb->asoc.peer_supports_prsctp) &&
3883 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3885 * Has it been retransmitted tv_sec times? -
3886 * we store the retran count there.
3888 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3889 /* Yes, so drop it */
3890 if (tp1->data != NULL) {
3891 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3892 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3893 SCTP_SO_NOT_LOCKED);
3895 /* Make sure to flag we had a FR */
3896 tp1->whoTo->net_ack++;
3897 tp1 = TAILQ_NEXT(tp1, sctp_next);
3901 /* printf("OK, we are now ready to FR this guy\n"); */
3902 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3903 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3907 /* This is a subsequent FR */
3908 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3910 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3911 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3913 * CMT: Using RTX_SSTHRESH policy for CMT.
3914 * If CMT is being used, then pick dest with
3915 * largest ssthresh for any retransmission.
3917 tp1->no_fr_allowed = 1;
3919 /* sa_ignore NO_NULL_CHK */
3920 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3922 * JRS 5/18/07 - If CMT PF is on,
3923 * use the PF version of
3926 alt = sctp_find_alternate_net(stcb, alt, 2);
3929 * JRS 5/18/07 - If only CMT is on,
3930 * use the CMT version of
3933 /* sa_ignore NO_NULL_CHK */
3934 alt = sctp_find_alternate_net(stcb, alt, 1);
3940 * CUCv2: If a different dest is picked for
3941 * the retransmission, then new
3942 * (rtx-)pseudo_cumack needs to be tracked
3943 * for orig dest. Let CUCv2 track new (rtx-)
3944 * pseudo-cumack always.
3947 tp1->whoTo->find_pseudo_cumack = 1;
3948 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3950 } else {/* CMT is OFF */
3952 #ifdef SCTP_FR_TO_ALTERNATE
3953 /* Can we find an alternate? */
3954 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3957 * default behavior is to NOT retransmit
3958 * FR's to an alternate. Armando Caro's
3959 * paper details why.
3965 tp1->rec.data.doing_fast_retransmit = 1;
3967 /* mark the sending seq for possible subsequent FR's */
3969 * printf("Marking TSN for FR new value %x\n",
3970 * (uint32_t)tpi->rec.data.TSN_seq);
3972 if (TAILQ_EMPTY(&asoc->send_queue)) {
3974 * If the queue of send is empty then its
3975 * the next sequence number that will be
3976 * assigned so we subtract one from this to
3977 * get the one we last sent.
3979 tp1->rec.data.fast_retran_tsn = sending_seq;
3982 * If there are chunks on the send queue
3983 * (unsent data that has made it from the
3984 * stream queues but not out the door, we
3985 * take the first one (which will have the
3986 * lowest TSN) and subtract one to get the
3989 struct sctp_tmit_chunk *ttt;
3991 ttt = TAILQ_FIRST(&asoc->send_queue);
3992 tp1->rec.data.fast_retran_tsn =
3993 ttt->rec.data.TSN_seq;
3998 * this guy had a RTO calculation pending on
4003 if (alt != tp1->whoTo) {
4004 /* yes, there is an alternate. */
4005 sctp_free_remote_addr(tp1->whoTo);
4006 /* sa_ignore FREED_MEMORY */
4008 atomic_add_int(&alt->ref_count, 1);
4011 tp1 = TAILQ_NEXT(tp1, sctp_next);
4014 if (tot_retrans > 0) {
4016 * Setup the ecn nonce re-sync point. We do this since once
4017 * we go to FR something we introduce a Karn's rule scenario
4018 * and won't know the totals for the ECN bits.
4020 asoc->nonce_resync_tsn = sending_seq;
4021 asoc->nonce_wait_for_ecne = 0;
4022 asoc->nonce_sum_check = 0;
4026 struct sctp_tmit_chunk *
4027 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
4028 struct sctp_association *asoc)
4030 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
4034 if (asoc->peer_supports_prsctp == 0) {
4037 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4039 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
4040 tp1->sent != SCTP_DATAGRAM_RESEND) {
4041 /* no chance to advance, out of here */
4044 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4045 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4046 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4047 asoc->advanced_peer_ack_point,
4048 tp1->rec.data.TSN_seq, 0, 0);
4051 if (!PR_SCTP_ENABLED(tp1->flags)) {
4053 * We can't fwd-tsn past any that are reliable aka
4054 * retransmitted until the asoc fails.
4059 (void)SCTP_GETTIME_TIMEVAL(&now);
4062 tp2 = TAILQ_NEXT(tp1, sctp_next);
4064 * now we got a chunk which is marked for another
4065 * retransmission to a PR-stream but has run out its chances
4066 * already maybe OR has been marked to skip now. Can we skip
4067 * it if its a resend?
4069 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
4070 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
4072 * Now is this one marked for resend and its time is
4075 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
4076 /* Yes so drop it */
4078 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
4079 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
4080 SCTP_SO_NOT_LOCKED);
4084 * No, we are done when hit one for resend
4085 * whos time as not expired.
4091 * Ok now if this chunk is marked to drop it we can clean up
4092 * the chunk, advance our peer ack point and we can check
4095 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4096 /* advance PeerAckPoint goes forward */
4097 if (compare_with_wrap(tp1->rec.data.TSN_seq,
4098 asoc->advanced_peer_ack_point,
4101 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
4103 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
4104 /* No update but we do save the chk */
4109 * If it is still in RESEND we can advance no
4115 * If we hit here we just dumped tp1, move to next tsn on
4124 sctp_fs_audit(struct sctp_association *asoc)
4126 struct sctp_tmit_chunk *chk;
4127 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
4128 int entry_flight, entry_cnt, ret;
4130 entry_flight = asoc->total_flight;
4131 entry_cnt = asoc->total_flight_count;
4134 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
4137 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4138 if (chk->sent < SCTP_DATAGRAM_RESEND) {
4139 printf("Chk TSN:%u size:%d inflight cnt:%d\n",
4140 chk->rec.data.TSN_seq,
4145 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
4147 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
4149 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
4156 if ((inflight > 0) || (inbetween > 0)) {
4158 panic("Flight size-express incorrect? \n");
4160 printf("asoc->total_flight:%d cnt:%d\n",
4161 entry_flight, entry_cnt);
4163 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
4164 inflight, inbetween, resend, above, acked);
4173 sctp_window_probe_recovery(struct sctp_tcb *stcb,
4174 struct sctp_association *asoc,
4175 struct sctp_nets *net,
4176 struct sctp_tmit_chunk *tp1)
4178 tp1->window_probe = 0;
4179 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
4180 /* TSN's skipped we do NOT move back. */
4181 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
4182 tp1->whoTo->flight_size,
4184 (uintptr_t) tp1->whoTo,
4185 tp1->rec.data.TSN_seq);
4188 /* First setup this by shrinking flight */
4189 sctp_flight_size_decrease(tp1);
4190 sctp_total_flight_decrease(stcb, tp1);
4191 /* Now mark for resend */
4192 tp1->sent = SCTP_DATAGRAM_RESEND;
4193 asoc->sent_queue_retran_cnt++;
4194 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4195 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
4196 tp1->whoTo->flight_size,
4198 (uintptr_t) tp1->whoTo,
4199 tp1->rec.data.TSN_seq);
4204 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4205 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
4207 struct sctp_nets *net;
4208 struct sctp_association *asoc;
4209 struct sctp_tmit_chunk *tp1, *tp2;
4211 int win_probe_recovery = 0;
4212 int win_probe_recovered = 0;
4213 int j, done_once = 0;
4215 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4216 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
4217 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4219 SCTP_TCB_LOCK_ASSERT(stcb);
4220 #ifdef SCTP_ASOCLOG_OF_TSNS
4221 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
4222 stcb->asoc.cumack_log_at++;
4223 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4224 stcb->asoc.cumack_log_at = 0;
4228 old_rwnd = asoc->peers_rwnd;
4229 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
4232 } else if (asoc->last_acked_seq == cumack) {
4233 /* Window update sack */
4234 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4235 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4236 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4237 /* SWS sender side engages */
4238 asoc->peers_rwnd = 0;
4240 if (asoc->peers_rwnd > old_rwnd) {
4245 /* First setup for CC stuff */
4246 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4247 net->prev_cwnd = net->cwnd;
4252 * CMT: Reset CUC and Fast recovery algo variables before
4255 net->new_pseudo_cumack = 0;
4256 net->will_exit_fast_recovery = 0;
4258 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4261 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4262 tp1 = TAILQ_LAST(&asoc->sent_queue,
4263 sctpchunk_listhead);
4264 send_s = tp1->rec.data.TSN_seq + 1;
4266 send_s = asoc->sending_seq;
4268 if ((cumack == send_s) ||
4269 compare_with_wrap(cumack, send_s, MAX_TSN)) {
4275 panic("Impossible sack 1");
4279 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4280 0, M_DONTWAIT, 1, MT_DATA);
4282 struct sctp_paramhdr *ph;
4285 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4287 ph = mtod(oper, struct sctp_paramhdr *);
4288 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4289 ph->param_length = htons(SCTP_BUF_LEN(oper));
4290 ippp = (uint32_t *) (ph + 1);
4291 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4293 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4294 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4299 asoc->this_sack_highest_gap = cumack;
4300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4301 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4302 stcb->asoc.overall_error_count,
4304 SCTP_FROM_SCTP_INDATA,
4307 stcb->asoc.overall_error_count = 0;
4308 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
4309 /* process the new consecutive TSN first */
4310 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4312 tp2 = TAILQ_NEXT(tp1, sctp_next);
4313 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
4315 cumack == tp1->rec.data.TSN_seq) {
4316 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4317 printf("Warning, an unsent is now acked?\n");
4320 * ECN Nonce: Add the nonce to the sender's
4323 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4324 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4326 * If it is less than ACKED, it is
4327 * now no-longer in flight. Higher
4328 * values may occur during marking
4330 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4331 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4332 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4333 tp1->whoTo->flight_size,
4335 (uintptr_t) tp1->whoTo,
4336 tp1->rec.data.TSN_seq);
4338 sctp_flight_size_decrease(tp1);
4339 /* sa_ignore NO_NULL_CHK */
4340 sctp_total_flight_decrease(stcb, tp1);
4342 tp1->whoTo->net_ack += tp1->send_size;
4343 if (tp1->snd_count < 2) {
4345 * True non-retransmited
4348 tp1->whoTo->net_ack2 +=
4351 /* update RTO too? */
4358 sctp_calculate_rto(stcb,
4360 &tp1->sent_rcv_time,
4361 sctp_align_safe_nocopy);
4366 * CMT: CUCv2 algorithm. From the
4367 * cumack'd TSNs, for each TSN being
4368 * acked for the first time, set the
4369 * following variables for the
4370 * corresp destination.
4371 * new_pseudo_cumack will trigger a
4373 * find_(rtx_)pseudo_cumack will
4374 * trigger search for the next
4375 * expected (rtx-)pseudo-cumack.
4377 tp1->whoTo->new_pseudo_cumack = 1;
4378 tp1->whoTo->find_pseudo_cumack = 1;
4379 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4381 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4382 /* sa_ignore NO_NULL_CHK */
4383 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4386 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4387 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4389 if (tp1->rec.data.chunk_was_revoked) {
4390 /* deflate the cwnd */
4391 tp1->whoTo->cwnd -= tp1->book_size;
4392 tp1->rec.data.chunk_was_revoked = 0;
4394 tp1->sent = SCTP_DATAGRAM_ACKED;
4395 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4397 /* sa_ignore NO_NULL_CHK */
4398 sctp_free_bufspace(stcb, asoc, tp1, 1);
4399 sctp_m_freem(tp1->data);
4401 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4402 sctp_log_sack(asoc->last_acked_seq,
4404 tp1->rec.data.TSN_seq,
4407 SCTP_LOG_FREE_SENT);
4410 asoc->sent_queue_cnt--;
4411 sctp_free_a_chunk(stcb, tp1);
4419 /* sa_ignore NO_NULL_CHK */
4420 if (stcb->sctp_socket) {
4421 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4426 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4427 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4428 /* sa_ignore NO_NULL_CHK */
4429 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4431 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4432 so = SCTP_INP_SO(stcb->sctp_ep);
4433 atomic_add_int(&stcb->asoc.refcnt, 1);
4434 SCTP_TCB_UNLOCK(stcb);
4435 SCTP_SOCKET_LOCK(so, 1);
4436 SCTP_TCB_LOCK(stcb);
4437 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4438 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4439 /* assoc was freed while we were unlocked */
4440 SCTP_SOCKET_UNLOCK(so, 1);
4444 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4445 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4446 SCTP_SOCKET_UNLOCK(so, 1);
4449 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4450 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4454 /* JRS - Use the congestion control given in the CC module */
4455 if (asoc->last_acked_seq != cumack)
4456 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4458 asoc->last_acked_seq = cumack;
4460 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4461 /* nothing left in-flight */
4462 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4463 net->flight_size = 0;
4464 net->partial_bytes_acked = 0;
4466 asoc->total_flight = 0;
4467 asoc->total_flight_count = 0;
4469 /* ECN Nonce updates */
4470 if (asoc->ecn_nonce_allowed) {
4471 if (asoc->nonce_sum_check) {
4472 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4473 if (asoc->nonce_wait_for_ecne == 0) {
4474 struct sctp_tmit_chunk *lchk;
4476 lchk = TAILQ_FIRST(&asoc->send_queue);
4477 asoc->nonce_wait_for_ecne = 1;
4479 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4481 asoc->nonce_wait_tsn = asoc->sending_seq;
4484 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4485 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4487 * Misbehaving peer. We need
4488 * to react to this guy
4490 asoc->ecn_allowed = 0;
4491 asoc->ecn_nonce_allowed = 0;
4496 /* See if Resynchronization Possible */
4497 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4498 asoc->nonce_sum_check = 1;
4500 * now we must calculate what the base is.
4501 * We do this based on two things, we know
4502 * the total's for all the segments
4503 * gap-acked in the SACK (none), We also
4504 * know the SACK's nonce sum, its in
4505 * nonce_sum_flag. So we can build a truth
4506 * table to back-calculate the new value of
4507 * asoc->nonce_sum_expect_base:
4509 * SACK-flag-Value Seg-Sums Base 0 0 0
4513 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4518 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4519 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4520 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4521 /* SWS sender side engages */
4522 asoc->peers_rwnd = 0;
4524 if (asoc->peers_rwnd > old_rwnd) {
4525 win_probe_recovery = 1;
4527 /* Now assure a timer where data is queued at */
4530 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4533 if (win_probe_recovery && (net->window_probe)) {
4534 win_probe_recovered = 1;
4536 * Find first chunk that was used with window probe
4537 * and clear the sent
4539 /* sa_ignore FREED_MEMORY */
4540 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4541 if (tp1->window_probe) {
4542 sctp_window_probe_recovery(stcb, asoc, net, tp1);
4547 if (net->RTO == 0) {
4548 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4550 to_ticks = MSEC_TO_TICKS(net->RTO);
4552 if (net->flight_size) {
4554 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4555 sctp_timeout_handler, &net->rxt_timer);
4556 if (net->window_probe) {
4557 net->window_probe = 0;
4560 if (net->window_probe) {
4562 * In window probes we must assure a timer
4563 * is still running there
4565 net->window_probe = 0;
4566 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4567 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4568 sctp_timeout_handler, &net->rxt_timer);
4570 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4571 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4573 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4575 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4576 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4577 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4578 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4579 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4585 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4586 (asoc->sent_queue_retran_cnt == 0) &&
4587 (win_probe_recovered == 0) &&
4590 * huh, this should not happen unless all packets are
4591 * PR-SCTP and marked to skip of course.
4593 if (sctp_fs_audit(asoc)) {
4594 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4595 if (net->flight_size) {
4596 net->flight_size = 0;
4599 asoc->total_flight = 0;
4600 asoc->total_flight_count = 0;
4601 asoc->sent_queue_retran_cnt = 0;
4602 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4603 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4604 sctp_flight_size_increase(tp1);
4605 sctp_total_flight_increase(stcb, tp1);
4606 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4607 asoc->sent_queue_retran_cnt++;
4614 /**********************************/
4615 /* Now what about shutdown issues */
4616 /**********************************/
4617 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4618 /* nothing left on sendqueue.. consider done */
4620 if ((asoc->stream_queue_cnt == 1) &&
4621 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4622 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4623 (asoc->locked_on_sending)
4625 struct sctp_stream_queue_pending *sp;
4628 * I may be in a state where we got all across.. but
4629 * cannot write more due to a shutdown... we abort
4630 * since the user did not indicate EOR in this case.
4631 * The sp will be cleaned during free of the asoc.
4633 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4635 if ((sp) && (sp->length == 0)) {
4636 /* Let cleanup code purge it */
4637 if (sp->msg_is_complete) {
4638 asoc->stream_queue_cnt--;
4640 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4641 asoc->locked_on_sending = NULL;
4642 asoc->stream_queue_cnt--;
4646 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4647 (asoc->stream_queue_cnt == 0)) {
4648 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4649 /* Need to abort here */
4655 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4656 0, M_DONTWAIT, 1, MT_DATA);
4658 struct sctp_paramhdr *ph;
4661 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4663 ph = mtod(oper, struct sctp_paramhdr *);
4664 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4665 ph->param_length = htons(SCTP_BUF_LEN(oper));
4666 ippp = (uint32_t *) (ph + 1);
4667 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4669 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4670 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4672 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4673 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4674 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4676 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4677 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4678 sctp_stop_timers_for_shutdown(stcb);
4679 sctp_send_shutdown(stcb,
4680 stcb->asoc.primary_destination);
4681 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4682 stcb->sctp_ep, stcb, asoc->primary_destination);
4683 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4684 stcb->sctp_ep, stcb, asoc->primary_destination);
4686 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4687 (asoc->stream_queue_cnt == 0)) {
4688 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4691 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4692 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4693 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4694 sctp_send_shutdown_ack(stcb,
4695 stcb->asoc.primary_destination);
4697 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4698 stcb->sctp_ep, stcb, asoc->primary_destination);
4701 /*********************************************/
4702 /* Here we perform PR-SCTP procedures */
4704 /*********************************************/
4705 /* C1. update advancedPeerAckPoint */
4706 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4707 asoc->advanced_peer_ack_point = cumack;
4709 /* PR-Sctp issues need to be addressed too */
4710 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4711 struct sctp_tmit_chunk *lchk;
4712 uint32_t old_adv_peer_ack_point;
4714 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4715 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4716 /* C3. See if we need to send a Fwd-TSN */
4717 if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4720 * ISSUE with ECN, see FWD-TSN processing for notes
4721 * on issues that will occur when the ECN NONCE
4722 * stuff is put into SCTP for cross checking.
4724 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4726 send_forward_tsn(stcb, asoc);
4728 * ECN Nonce: Disable Nonce Sum check when
4729 * FWD TSN is sent and store resync tsn
4731 asoc->nonce_sum_check = 0;
4732 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4734 /* try to FR fwd-tsn's that get lost too */
4735 lchk->rec.data.fwd_tsn_cnt++;
4736 if (lchk->rec.data.fwd_tsn_cnt > 3) {
4737 send_forward_tsn(stcb, asoc);
4738 lchk->rec.data.fwd_tsn_cnt = 0;
4743 /* Assure a timer is up */
4744 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4745 stcb->sctp_ep, stcb, lchk->whoTo);
4748 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4749 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4751 stcb->asoc.peers_rwnd,
4752 stcb->asoc.total_flight,
4753 stcb->asoc.total_output_queue_size);
4758 sctp_handle_sack(struct mbuf *m, int offset,
4759 struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4760 struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd)
4762 struct sctp_association *asoc;
4763 struct sctp_sack *sack;
4764 struct sctp_tmit_chunk *tp1, *tp2;
4765 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4766 this_sack_lowest_newack;
4767 uint32_t sav_cum_ack;
4768 uint16_t num_seg, num_dup;
4769 uint16_t wake_him = 0;
4770 unsigned int sack_length;
4771 uint32_t send_s = 0;
4773 int accum_moved = 0;
4774 int will_exit_fast_recovery = 0;
4775 uint32_t a_rwnd, old_rwnd;
4776 int win_probe_recovery = 0;
4777 int win_probe_recovered = 0;
4778 struct sctp_nets *net = NULL;
4779 int nonce_sum_flag, ecn_seg_sums = 0;
4781 uint8_t reneged_all = 0;
4782 uint8_t cmt_dac_flag;
4785 * we take any chance we can to service our queues since we cannot
4786 * get awoken when the socket is read from :<
4789 * Now perform the actual SACK handling: 1) Verify that it is not an
4790 * old sack, if so discard. 2) If there is nothing left in the send
4791 * queue (cum-ack is equal to last acked) then you have a duplicate
4792 * too, update any rwnd change and verify no timers are running.
4793 * then return. 3) Process any new consequtive data i.e. cum-ack
4794 * moved process these first and note that it moved. 4) Process any
4795 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4796 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4797 * sync up flightsizes and things, stop all timers and also check
4798 * for shutdown_pending state. If so then go ahead and send off the
4799 * shutdown. If in shutdown recv, send off the shutdown-ack and
4800 * start that timer, Ret. 9) Strike any non-acked things and do FR
4801 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4802 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4803 * if in shutdown_recv state.
4805 SCTP_TCB_LOCK_ASSERT(stcb);
4808 this_sack_lowest_newack = 0;
4810 sack_length = (unsigned int)sack_len;
4812 SCTP_STAT_INCR(sctps_slowpath_sack);
4813 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4814 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4815 #ifdef SCTP_ASOCLOG_OF_TSNS
4816 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4817 stcb->asoc.cumack_log_at++;
4818 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4819 stcb->asoc.cumack_log_at = 0;
4822 num_seg = ntohs(sack->num_gap_ack_blks);
4826 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4827 num_dup = ntohs(sack->num_dup_tsns);
4829 old_rwnd = stcb->asoc.peers_rwnd;
4830 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4831 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4832 stcb->asoc.overall_error_count,
4834 SCTP_FROM_SCTP_INDATA,
4837 stcb->asoc.overall_error_count = 0;
4839 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4840 sctp_log_sack(asoc->last_acked_seq,
4847 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4848 int off_to_dup, iii;
4849 uint32_t *dupdata, dblock;
4851 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4852 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4853 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4854 sizeof(uint32_t), (uint8_t *) & dblock);
4855 off_to_dup += sizeof(uint32_t);
4857 for (iii = 0; iii < num_dup; iii++) {
4858 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4859 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4860 sizeof(uint32_t), (uint8_t *) & dblock);
4861 if (dupdata == NULL)
4863 off_to_dup += sizeof(uint32_t);
4867 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4868 off_to_dup, num_dup, sack_length, num_seg);
4871 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4873 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4874 tp1 = TAILQ_LAST(&asoc->sent_queue,
4875 sctpchunk_listhead);
4876 send_s = tp1->rec.data.TSN_seq + 1;
4878 send_s = asoc->sending_seq;
4880 if (cum_ack == send_s ||
4881 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4888 panic("Impossible sack 1");
4893 * no way, we have not even sent this TSN out yet.
4894 * Peer is hopelessly messed up with us.
4899 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4900 0, M_DONTWAIT, 1, MT_DATA);
4902 struct sctp_paramhdr *ph;
4905 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4907 ph = mtod(oper, struct sctp_paramhdr *);
4908 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4909 ph->param_length = htons(SCTP_BUF_LEN(oper));
4910 ippp = (uint32_t *) (ph + 1);
4911 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4913 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4914 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4919 /**********************/
4920 /* 1) check the range */
4921 /**********************/
4922 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4923 /* acking something behind */
4926 sav_cum_ack = asoc->last_acked_seq;
4928 /* update the Rwnd of the peer */
4929 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4930 TAILQ_EMPTY(&asoc->send_queue) &&
4931 (asoc->stream_queue_cnt == 0)
4933 /* nothing left on send/sent and strmq */
4934 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4935 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4936 asoc->peers_rwnd, 0, 0, a_rwnd);
4938 asoc->peers_rwnd = a_rwnd;
4939 if (asoc->sent_queue_retran_cnt) {
4940 asoc->sent_queue_retran_cnt = 0;
4942 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4943 /* SWS sender side engages */
4944 asoc->peers_rwnd = 0;
4946 /* stop any timers */
4947 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4948 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4949 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4950 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4951 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4952 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4953 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4954 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4957 net->partial_bytes_acked = 0;
4958 net->flight_size = 0;
4960 asoc->total_flight = 0;
4961 asoc->total_flight_count = 0;
4965 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4966 * things. The total byte count acked is tracked in netAckSz AND
4967 * netAck2 is used to track the total bytes acked that are un-
4968 * amibguious and were never retransmitted. We track these on a per
4969 * destination address basis.
4971 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4972 net->prev_cwnd = net->cwnd;
4977 * CMT: Reset CUC and Fast recovery algo variables before
4980 net->new_pseudo_cumack = 0;
4981 net->will_exit_fast_recovery = 0;
4983 /* process the new consecutive TSN first */
4984 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4986 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4988 last_tsn == tp1->rec.data.TSN_seq) {
4989 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4991 * ECN Nonce: Add the nonce to the sender's
4994 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4996 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4998 * If it is less than ACKED, it is
4999 * now no-longer in flight. Higher
5000 * values may occur during marking
5002 if ((tp1->whoTo->dest_state &
5003 SCTP_ADDR_UNCONFIRMED) &&
5004 (tp1->snd_count < 2)) {
5006 * If there was no retran
5007 * and the address is
5008 * un-confirmed and we sent
5010 * sacked.. its confirmed,
5013 tp1->whoTo->dest_state &=
5014 ~SCTP_ADDR_UNCONFIRMED;
5016 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5017 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5018 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
5019 tp1->whoTo->flight_size,
5021 (uintptr_t) tp1->whoTo,
5022 tp1->rec.data.TSN_seq);
5024 sctp_flight_size_decrease(tp1);
5025 sctp_total_flight_decrease(stcb, tp1);
5027 tp1->whoTo->net_ack += tp1->send_size;
5029 /* CMT SFR and DAC algos */
5030 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
5031 tp1->whoTo->saw_newack = 1;
5033 if (tp1->snd_count < 2) {
5035 * True non-retransmited
5038 tp1->whoTo->net_ack2 +=
5041 /* update RTO too? */
5044 sctp_calculate_rto(stcb,
5046 &tp1->sent_rcv_time,
5047 sctp_align_safe_nocopy);
5052 * CMT: CUCv2 algorithm. From the
5053 * cumack'd TSNs, for each TSN being
5054 * acked for the first time, set the
5055 * following variables for the
5056 * corresp destination.
5057 * new_pseudo_cumack will trigger a
5059 * find_(rtx_)pseudo_cumack will
5060 * trigger search for the next
5061 * expected (rtx-)pseudo-cumack.
5063 tp1->whoTo->new_pseudo_cumack = 1;
5064 tp1->whoTo->find_pseudo_cumack = 1;
5065 tp1->whoTo->find_rtx_pseudo_cumack = 1;
5068 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5069 sctp_log_sack(asoc->last_acked_seq,
5071 tp1->rec.data.TSN_seq,
5074 SCTP_LOG_TSN_ACKED);
5076 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
5077 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
5080 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5081 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
5082 #ifdef SCTP_AUDITING_ENABLED
5083 sctp_audit_log(0xB3,
5084 (asoc->sent_queue_retran_cnt & 0x000000ff));
5087 if (tp1->rec.data.chunk_was_revoked) {
5088 /* deflate the cwnd */
5089 tp1->whoTo->cwnd -= tp1->book_size;
5090 tp1->rec.data.chunk_was_revoked = 0;
5092 tp1->sent = SCTP_DATAGRAM_ACKED;
5097 tp1 = TAILQ_NEXT(tp1, sctp_next);
5099 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
5100 /* always set this up to cum-ack */
5101 asoc->this_sack_highest_gap = last_tsn;
5103 /* Move offset up to point to gaps/dups */
5104 offset += sizeof(struct sctp_sack_chunk);
5105 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
5107 /* skip corrupt segments */
5113 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
5114 * to be greater than the cumack. Also reset saw_newack to 0
5117 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5118 net->saw_newack = 0;
5119 net->this_sack_highest_newack = last_tsn;
5123 * thisSackHighestGap will increase while handling NEW
5124 * segments this_sack_highest_newack will increase while
5125 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
5126 * used for CMT DAC algo. saw_newack will also change.
5128 sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn,
5129 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
5130 num_seg, &ecn_seg_sums);
5132 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
5134 * validate the biggest_tsn_acked in the gap acks if
5135 * strict adherence is wanted.
5137 if ((biggest_tsn_acked == send_s) ||
5138 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
5140 * peer is either confused or we are under
5141 * attack. We must abort.
5148 /*******************************************/
5149 /* cancel ALL T3-send timer if accum moved */
5150 /*******************************************/
5151 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
5152 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5153 if (net->new_pseudo_cumack)
5154 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5156 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
5161 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5162 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5163 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
5167 /********************************************/
5168 /* drop the acked chunks from the sendqueue */
5169 /********************************************/
5170 asoc->last_acked_seq = cum_ack;
5172 tp1 = TAILQ_FIRST(&asoc->sent_queue);
5176 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
5180 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
5181 /* no more sent on list */
5182 printf("Warning, tp1->sent == %d and its now acked?\n",
5185 tp2 = TAILQ_NEXT(tp1, sctp_next);
5186 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
5187 if (tp1->pr_sctp_on) {
5188 if (asoc->pr_sctp_cnt != 0)
5189 asoc->pr_sctp_cnt--;
5191 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
5192 (asoc->total_flight > 0)) {
5194 panic("Warning flight size is postive and should be 0");
5196 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
5197 asoc->total_flight);
5199 asoc->total_flight = 0;
5202 /* sa_ignore NO_NULL_CHK */
5203 sctp_free_bufspace(stcb, asoc, tp1, 1);
5204 sctp_m_freem(tp1->data);
5205 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
5206 asoc->sent_queue_cnt_removeable--;
5209 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5210 sctp_log_sack(asoc->last_acked_seq,
5212 tp1->rec.data.TSN_seq,
5215 SCTP_LOG_FREE_SENT);
5218 asoc->sent_queue_cnt--;
5219 sctp_free_a_chunk(stcb, tp1);
5222 } while (tp1 != NULL);
5225 /* sa_ignore NO_NULL_CHK */
5226 if ((wake_him) && (stcb->sctp_socket)) {
5227 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5231 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
5232 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5233 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
5235 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5236 so = SCTP_INP_SO(stcb->sctp_ep);
5237 atomic_add_int(&stcb->asoc.refcnt, 1);
5238 SCTP_TCB_UNLOCK(stcb);
5239 SCTP_SOCKET_LOCK(so, 1);
5240 SCTP_TCB_LOCK(stcb);
5241 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5242 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5243 /* assoc was freed while we were unlocked */
5244 SCTP_SOCKET_UNLOCK(so, 1);
5248 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
5249 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5250 SCTP_SOCKET_UNLOCK(so, 1);
5253 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5254 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
5258 if (asoc->fast_retran_loss_recovery && accum_moved) {
5259 if (compare_with_wrap(asoc->last_acked_seq,
5260 asoc->fast_recovery_tsn, MAX_TSN) ||
5261 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
5262 /* Setup so we will exit RFC2582 fast recovery */
5263 will_exit_fast_recovery = 1;
5267 * Check for revoked fragments:
5269 * if Previous sack - Had no frags then we can't have any revoked if
5270 * Previous sack - Had frag's then - If we now have frags aka
5271 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
5272 * some of them. else - The peer revoked all ACKED fragments, since
5273 * we had some before and now we have NONE.
5277 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
5278 else if (asoc->saw_sack_with_frags) {
5279 int cnt_revoked = 0;
5281 tp1 = TAILQ_FIRST(&asoc->sent_queue);
5283 /* Peer revoked all dg's marked or acked */
5284 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5285 if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
5286 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
5287 tp1->sent = SCTP_DATAGRAM_SENT;
5288 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5289 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
5290 tp1->whoTo->flight_size,
5292 (uintptr_t) tp1->whoTo,
5293 tp1->rec.data.TSN_seq);
5295 sctp_flight_size_increase(tp1);
5296 sctp_total_flight_increase(stcb, tp1);
5297 tp1->rec.data.chunk_was_revoked = 1;
5299 * To ensure that this increase in
5300 * flightsize, which is artificial,
5301 * does not throttle the sender, we
5302 * also increase the cwnd
5305 tp1->whoTo->cwnd += tp1->book_size;
5313 asoc->saw_sack_with_frags = 0;
5316 asoc->saw_sack_with_frags = 1;
5318 asoc->saw_sack_with_frags = 0;
5320 /* JRS - Use the congestion control given in the CC module */
5321 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5323 if (TAILQ_EMPTY(&asoc->sent_queue)) {
5324 /* nothing left in-flight */
5325 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5326 /* stop all timers */
5327 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5328 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5329 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5330 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5331 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
5334 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5335 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5336 net->flight_size = 0;
5337 net->partial_bytes_acked = 0;
5339 asoc->total_flight = 0;
5340 asoc->total_flight_count = 0;
5342 /**********************************/
5343 /* Now what about shutdown issues */
5344 /**********************************/
5345 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5346 /* nothing left on sendqueue.. consider done */
5347 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5348 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5349 asoc->peers_rwnd, 0, 0, a_rwnd);
5351 asoc->peers_rwnd = a_rwnd;
5352 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5353 /* SWS sender side engages */
5354 asoc->peers_rwnd = 0;
5357 if ((asoc->stream_queue_cnt == 1) &&
5358 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5359 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5360 (asoc->locked_on_sending)
5362 struct sctp_stream_queue_pending *sp;
5365 * I may be in a state where we got all across.. but
5366 * cannot write more due to a shutdown... we abort
5367 * since the user did not indicate EOR in this case.
5369 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5371 if ((sp) && (sp->length == 0)) {
5372 asoc->locked_on_sending = NULL;
5373 if (sp->msg_is_complete) {
5374 asoc->stream_queue_cnt--;
5376 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5377 asoc->stream_queue_cnt--;
5381 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5382 (asoc->stream_queue_cnt == 0)) {
5383 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5384 /* Need to abort here */
5390 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5391 0, M_DONTWAIT, 1, MT_DATA);
5393 struct sctp_paramhdr *ph;
5396 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5398 ph = mtod(oper, struct sctp_paramhdr *);
5399 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5400 ph->param_length = htons(SCTP_BUF_LEN(oper));
5401 ippp = (uint32_t *) (ph + 1);
5402 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5404 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5405 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5408 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5409 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5410 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5412 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5413 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5414 sctp_stop_timers_for_shutdown(stcb);
5415 sctp_send_shutdown(stcb,
5416 stcb->asoc.primary_destination);
5417 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5418 stcb->sctp_ep, stcb, asoc->primary_destination);
5419 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5420 stcb->sctp_ep, stcb, asoc->primary_destination);
5423 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5424 (asoc->stream_queue_cnt == 0)) {
5425 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5428 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5429 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5430 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5431 sctp_send_shutdown_ack(stcb,
5432 stcb->asoc.primary_destination);
5434 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5435 stcb->sctp_ep, stcb, asoc->primary_destination);
5440 * Now here we are going to recycle net_ack for a different use...
5443 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5448 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5449 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5450 * automatically ensure that.
5452 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5453 this_sack_lowest_newack = cum_ack;
5456 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5457 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5459 /* JRS - Use the congestion control given in the CC module */
5460 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5462 /******************************************************************
5463 * Here we do the stuff with ECN Nonce checking.
5464 * We basically check to see if the nonce sum flag was incorrect
5465 * or if resynchronization needs to be done. Also if we catch a
5466 * misbehaving receiver we give him the kick.
5467 ******************************************************************/
5469 if (asoc->ecn_nonce_allowed) {
5470 if (asoc->nonce_sum_check) {
5471 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5472 if (asoc->nonce_wait_for_ecne == 0) {
5473 struct sctp_tmit_chunk *lchk;
5475 lchk = TAILQ_FIRST(&asoc->send_queue);
5476 asoc->nonce_wait_for_ecne = 1;
5478 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5480 asoc->nonce_wait_tsn = asoc->sending_seq;
5483 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5484 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5486 * Misbehaving peer. We need
5487 * to react to this guy
5489 asoc->ecn_allowed = 0;
5490 asoc->ecn_nonce_allowed = 0;
5495 /* See if Resynchronization Possible */
5496 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5497 asoc->nonce_sum_check = 1;
5499 * now we must calculate what the base is.
5500 * We do this based on two things, we know
5501 * the total's for all the segments
5502 * gap-acked in the SACK, its stored in
5503 * ecn_seg_sums. We also know the SACK's
5504 * nonce sum, its in nonce_sum_flag. So we
5505 * can build a truth table to back-calculate
5507 * asoc->nonce_sum_expect_base:
5509 * SACK-flag-Value Seg-Sums Base 0 0 0
5513 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5517 /* Now are we exiting loss recovery ? */
5518 if (will_exit_fast_recovery) {
5519 /* Ok, we must exit fast recovery */
5520 asoc->fast_retran_loss_recovery = 0;
5522 if ((asoc->sat_t3_loss_recovery) &&
5523 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5525 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5526 /* end satellite t3 loss recovery */
5527 asoc->sat_t3_loss_recovery = 0;
5532 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5533 if (net->will_exit_fast_recovery) {
5534 /* Ok, we must exit fast recovery */
5535 net->fast_retran_loss_recovery = 0;
5539 /* Adjust and set the new rwnd value */
5540 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5541 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5542 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5544 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5545 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5546 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5547 /* SWS sender side engages */
5548 asoc->peers_rwnd = 0;
5550 if (asoc->peers_rwnd > old_rwnd) {
5551 win_probe_recovery = 1;
5554 * Now we must setup so we have a timer up for anyone with
5560 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5561 if (win_probe_recovery && (net->window_probe)) {
5562 win_probe_recovered = 1;
5564 * Find first chunk that was used with
5565 * window probe and clear the event. Put
5566 * it back into the send queue as if has
5569 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5570 if (tp1->window_probe) {
5571 sctp_window_probe_recovery(stcb, asoc, net, tp1);
5576 if (net->flight_size) {
5578 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5579 stcb->sctp_ep, stcb, net);
5580 if (net->window_probe) {
5583 if (net->window_probe) {
5585 * In window probes we must assure a timer
5586 * is still running there
5589 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5590 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5591 stcb->sctp_ep, stcb, net);
5594 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5595 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5597 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5599 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5600 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5601 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5602 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5603 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5609 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5610 (asoc->sent_queue_retran_cnt == 0) &&
5611 (win_probe_recovered == 0) &&
5614 * huh, this should not happen unless all packets are
5615 * PR-SCTP and marked to skip of course.
5617 if (sctp_fs_audit(asoc)) {
5618 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5619 net->flight_size = 0;
5621 asoc->total_flight = 0;
5622 asoc->total_flight_count = 0;
5623 asoc->sent_queue_retran_cnt = 0;
5624 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5625 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5626 sctp_flight_size_increase(tp1);
5627 sctp_total_flight_increase(stcb, tp1);
5628 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5629 asoc->sent_queue_retran_cnt++;
5636 /* Fix up the a-p-a-p for future PR-SCTP sends */
5637 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5638 asoc->advanced_peer_ack_point = cum_ack;
5640 /* C2. try to further move advancedPeerAckPoint ahead */
5641 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5642 struct sctp_tmit_chunk *lchk;
5643 uint32_t old_adv_peer_ack_point;
5645 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5646 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5647 /* C3. See if we need to send a Fwd-TSN */
5648 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5651 * ISSUE with ECN, see FWD-TSN processing for notes
5652 * on issues that will occur when the ECN NONCE
5653 * stuff is put into SCTP for cross checking.
5655 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5656 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5657 0xee, cum_ack, asoc->advanced_peer_ack_point,
5658 old_adv_peer_ack_point);
5660 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5662 send_forward_tsn(stcb, asoc);
5664 * ECN Nonce: Disable Nonce Sum check when
5665 * FWD TSN is sent and store resync tsn
5667 asoc->nonce_sum_check = 0;
5668 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5670 /* try to FR fwd-tsn's that get lost too */
5671 lchk->rec.data.fwd_tsn_cnt++;
5672 if (lchk->rec.data.fwd_tsn_cnt > 3) {
5673 send_forward_tsn(stcb, asoc);
5674 lchk->rec.data.fwd_tsn_cnt = 0;
5679 /* Assure a timer is up */
5680 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5681 stcb->sctp_ep, stcb, lchk->whoTo);
5684 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5685 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5687 stcb->asoc.peers_rwnd,
5688 stcb->asoc.total_flight,
5689 stcb->asoc.total_output_queue_size);
5694 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5695 struct sctp_nets *netp, int *abort_flag)
5698 uint32_t cum_ack, a_rwnd;
5700 cum_ack = ntohl(cp->cumulative_tsn_ack);
5701 /* Arrange so a_rwnd does NOT change */
5702 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5704 /* Now call the express sack handling */
5705 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5709 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5710 struct sctp_stream_in *strmin)
5712 struct sctp_queued_to_read *ctl, *nctl;
5713 struct sctp_association *asoc;
5716 /* EY -used to calculate nr_gap information */
5717 uint32_t nr_tsn, nr_gap;
5720 tt = strmin->last_sequence_delivered;
5722 * First deliver anything prior to and including the stream no that
5725 ctl = TAILQ_FIRST(&strmin->inqueue);
5727 nctl = TAILQ_NEXT(ctl, next);
5728 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5729 (tt == ctl->sinfo_ssn)) {
5730 /* this is deliverable now */
5731 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5732 /* subtract pending on streams */
5733 asoc->size_on_all_streams -= ctl->length;
5734 sctp_ucount_decr(asoc->cnt_on_all_streams);
5735 /* deliver it to at least the delivery-q */
5736 if (stcb->sctp_socket) {
5737 /* EY need the tsn info for calculating nr */
5738 nr_tsn = ctl->sinfo_tsn;
5739 sctp_add_to_readq(stcb->sctp_ep, stcb,
5741 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5743 * EY this is the chunk that should be
5744 * tagged nr gapped calculate the gap and
5745 * such then tag this TSN nr
5746 * chk->rec.data.TSN_seq
5748 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5750 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
5751 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5752 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5754 * EY These should never
5755 * happen- explained before
5758 SCTP_TCB_LOCK_ASSERT(stcb);
5759 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5760 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
5761 if (compare_with_wrap(nr_tsn,
5762 asoc->highest_tsn_inside_nr_map,
5764 asoc->highest_tsn_inside_nr_map = nr_tsn;
5766 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5769 * sctp_kick_prsctp_reorder_q
5770 * ueue(7): Something wrong,
5771 * the TSN to be tagged"
5772 * "\nas NR is not even in
5773 * the mapping_array, or map
5778 * EY - not %100 sure about
5779 * the lock thing, don't
5780 * think its required
5783 * SCTP_TCB_LOCK_ASSERT(stcb)
5788 * printf("\nCalculating an
5789 * nr_gap!!\nmapping_array_si
5791 * nr_mapping_array_size =
5792 * %d" "\nmapping_array_base
5794 * nr_mapping_array_base =
5795 * %d\nhighest_tsn_inside_map
5797 * "highest_tsn_inside_nr_map
5798 * = %d\nTSN = %d nr_gap =
5799 * %d",asoc->mapping_array_si
5801 * asoc->nr_mapping_array_siz
5803 * asoc->mapping_array_base_t
5805 * asoc->nr_mapping_array_bas
5807 * asoc->highest_tsn_inside_m
5809 * asoc->highest_tsn_inside_n
5810 * r_map,tsn,nr_gap);
5816 /* no more delivery now. */
5822 * now we must deliver things in queue the normal way if any are
5825 tt = strmin->last_sequence_delivered + 1;
5826 ctl = TAILQ_FIRST(&strmin->inqueue);
5828 nctl = TAILQ_NEXT(ctl, next);
5829 if (tt == ctl->sinfo_ssn) {
5830 /* this is deliverable now */
5831 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5832 /* subtract pending on streams */
5833 asoc->size_on_all_streams -= ctl->length;
5834 sctp_ucount_decr(asoc->cnt_on_all_streams);
5835 /* deliver it to at least the delivery-q */
5836 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5837 if (stcb->sctp_socket) {
5839 nr_tsn = ctl->sinfo_tsn;
5840 sctp_add_to_readq(stcb->sctp_ep, stcb,
5842 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5844 * EY this is the chunk that should be
5845 * tagged nr gapped calculate the gap and
5846 * such then tag this TSN nr
5847 * chk->rec.data.TSN_seq
5849 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5850 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
5851 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5852 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5854 * EY These should never
5855 * happen, explained before
5858 SCTP_TCB_LOCK_ASSERT(stcb);
5859 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5860 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
5861 if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
5863 asoc->highest_tsn_inside_nr_map = nr_tsn;
5865 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5868 * sctp_kick_prsctp_reorder_q
5869 * ueue(8): Something wrong,
5870 * the TSN to be tagged"
5871 * "\nas NR is not even in
5872 * the mapping_array, or map
5877 * EY - not %100 sure about
5878 * the lock thing, don't
5879 * think its required
5882 * SCTP_TCB_LOCK_ASSERT(stcb)
5887 * printf("\nCalculating an
5888 * nr_gap!!\nmapping_array_si
5890 * nr_mapping_array_size =
5891 * %d" "\nmapping_array_base
5893 * nr_mapping_array_base =
5894 * %d\nhighest_tsn_inside_map
5896 * "highest_tsn_inside_nr_map
5897 * = %d\nTSN = %d nr_gap =
5898 * %d",asoc->mapping_array_si
5900 * asoc->nr_mapping_array_siz
5902 * asoc->mapping_array_base_t
5904 * asoc->nr_mapping_array_bas
5906 * asoc->highest_tsn_inside_m
5908 * asoc->highest_tsn_inside_n
5909 * r_map,tsn,nr_gap);
5914 tt = strmin->last_sequence_delivered + 1;
5923 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5924 struct sctp_association *asoc,
5925 uint16_t stream, uint16_t seq)
5927 struct sctp_tmit_chunk *chk, *at;
5929 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5930 /* For each one on here see if we need to toss it */
5932 * For now large messages held on the reasmqueue that are
5933 * complete will be tossed too. We could in theory do more
5934 * work to spin through and stop after dumping one msg aka
5935 * seeing the start of a new msg at the head, and call the
5936 * delivery function... to see if it can be delivered... But
5937 * for now we just dump everything on the queue.
5939 chk = TAILQ_FIRST(&asoc->reasmqueue);
5941 at = TAILQ_NEXT(chk, sctp_next);
5943 * Do not toss it if on a different stream or marked
5944 * for unordered delivery in which case the stream
5945 * sequence number has no meaning.
5947 if ((chk->rec.data.stream_number != stream) ||
5948 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5952 if (chk->rec.data.stream_seq == seq) {
5953 /* It needs to be tossed */
5954 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5955 if (compare_with_wrap(chk->rec.data.TSN_seq,
5956 asoc->tsn_last_delivered, MAX_TSN)) {
5957 asoc->tsn_last_delivered =
5958 chk->rec.data.TSN_seq;
5959 asoc->str_of_pdapi =
5960 chk->rec.data.stream_number;
5961 asoc->ssn_of_pdapi =
5962 chk->rec.data.stream_seq;
5963 asoc->fragment_flags =
5964 chk->rec.data.rcv_flags;
5966 asoc->size_on_reasm_queue -= chk->send_size;
5967 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5969 /* Clear up any stream problem */
5970 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5971 SCTP_DATA_UNORDERED &&
5972 (compare_with_wrap(chk->rec.data.stream_seq,
5973 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5976 * We must dump forward this streams
5977 * sequence number if the chunk is
5978 * not unordered that is being
5979 * skipped. There is a chance that
5980 * if the peer does not include the
5981 * last fragment in its FWD-TSN we
5982 * WILL have a problem here since
5983 * you would have a partial chunk in
5984 * queue that may not be
5985 * deliverable. Also if a Partial
5986 * delivery API as started the user
5987 * may get a partial chunk. The next
5988 * read returning a new chunk...
5989 * really ugly but I see no way
5990 * around it! Maybe a notify??
5992 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5993 chk->rec.data.stream_seq;
5996 sctp_m_freem(chk->data);
5999 sctp_free_a_chunk(stcb, chk);
6000 } else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
6002 * If the stream_seq is > than the purging
6014 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
6015 struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
6018 * ISSUES that MUST be fixed for ECN! When we are the sender of the
6019 * forward TSN, when the SACK comes back that acknowledges the
6020 * FWD-TSN we must reset the NONCE sum to match correctly. This will
6021 * get quite tricky since we may have sent more data interveneing
6022 * and must carefully account for what the SACK says on the nonce
6023 * and any gaps that are reported. This work will NOT be done here,
6024 * but I note it here since it is really related to PR-SCTP and
6028 /* The pr-sctp fwd tsn */
6030 * here we will perform all the data receiver side steps for
6031 * processing FwdTSN, as required in by pr-sctp draft:
6033 * Assume we get FwdTSN(x):
6035 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
6036 * others we have 3) examine and update re-ordering queue on
6037 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
6038 * report where we are.
6040 struct sctp_association *asoc;
6041 uint32_t new_cum_tsn, gap;
6042 unsigned int i, fwd_sz, cumack_set_flag, m_size;
6044 struct sctp_stream_in *strm;
6045 struct sctp_tmit_chunk *chk, *at;
6046 struct sctp_queued_to_read *ctl, *sv;
6048 cumack_set_flag = 0;
6050 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
6051 SCTPDBG(SCTP_DEBUG_INDATA1,
6052 "Bad size too small/big fwd-tsn\n");
6055 m_size = (stcb->asoc.mapping_array_size << 3);
6056 /*************************************************************/
6057 /* 1. Here we update local cumTSN and shift the bitmap array */
6058 /*************************************************************/
6059 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
6061 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
6062 asoc->cumulative_tsn == new_cum_tsn) {
6063 /* Already got there ... */
6066 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
6068 asoc->highest_tsn_inside_map = new_cum_tsn;
6069 /* EY nr_mapping_array version of the above */
6071 * if(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
6072 * asoc->peer_supports_nr_sack)
6074 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6076 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6080 * now we know the new TSN is more advanced, let's find the actual
6083 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
6084 if (gap >= m_size) {
6085 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6086 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6088 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
6092 * out of range (of single byte chunks in the rwnd I
6093 * give out). This must be an attacker.
6096 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
6097 0, M_DONTWAIT, 1, MT_DATA);
6099 struct sctp_paramhdr *ph;
6102 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6103 (sizeof(uint32_t) * 3);
6104 ph = mtod(oper, struct sctp_paramhdr *);
6105 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6106 ph->param_length = htons(SCTP_BUF_LEN(oper));
6107 ippp = (uint32_t *) (ph + 1);
6108 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
6110 *ippp = asoc->highest_tsn_inside_map;
6112 *ippp = new_cum_tsn;
6114 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
6115 sctp_abort_an_association(stcb->sctp_ep, stcb,
6116 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6119 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
6120 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
6121 cumack_set_flag = 1;
6122 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
6123 asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
6124 /* EY - nr_sack: nr_mapping_array version of the above */
6125 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
6126 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
6127 asoc->nr_mapping_array_base_tsn = new_cum_tsn + 1;
6128 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6129 if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
6131 * printf("IN sctp_handle_forward_tsn:
6132 * Something is wrong the size of" "map and
6133 * nr_map should be equal!")
6137 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6138 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6140 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
6142 SCTP_TCB_LOCK_ASSERT(stcb);
6143 for (i = 0; i <= gap; i++) {
6144 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack
6145 && SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
6146 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
6148 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
6152 * Now after marking all, slide thing forward but no sack
6155 sctp_sack_check(stcb, 0, 0, abort_flag);
6159 /*************************************************************/
6160 /* 2. Clear up re-assembly queue */
6161 /*************************************************************/
6163 * First service it if pd-api is up, just in case we can progress it
6166 if (asoc->fragmented_delivery_inprogress) {
6167 sctp_service_reassembly(stcb, asoc);
6169 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
6170 /* For each one on here see if we need to toss it */
6172 * For now large messages held on the reasmqueue that are
6173 * complete will be tossed too. We could in theory do more
6174 * work to spin through and stop after dumping one msg aka
6175 * seeing the start of a new msg at the head, and call the
6176 * delivery function... to see if it can be delivered... But
6177 * for now we just dump everything on the queue.
6179 chk = TAILQ_FIRST(&asoc->reasmqueue);
6181 at = TAILQ_NEXT(chk, sctp_next);
6182 if ((compare_with_wrap(new_cum_tsn,
6183 chk->rec.data.TSN_seq, MAX_TSN)) ||
6184 (new_cum_tsn == chk->rec.data.TSN_seq)) {
6185 /* It needs to be tossed */
6186 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
6187 if (compare_with_wrap(chk->rec.data.TSN_seq,
6188 asoc->tsn_last_delivered, MAX_TSN)) {
6189 asoc->tsn_last_delivered =
6190 chk->rec.data.TSN_seq;
6191 asoc->str_of_pdapi =
6192 chk->rec.data.stream_number;
6193 asoc->ssn_of_pdapi =
6194 chk->rec.data.stream_seq;
6195 asoc->fragment_flags =
6196 chk->rec.data.rcv_flags;
6198 asoc->size_on_reasm_queue -= chk->send_size;
6199 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
6201 /* Clear up any stream problem */
6202 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
6203 SCTP_DATA_UNORDERED &&
6204 (compare_with_wrap(chk->rec.data.stream_seq,
6205 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
6208 * We must dump forward this streams
6209 * sequence number if the chunk is
6210 * not unordered that is being
6211 * skipped. There is a chance that
6212 * if the peer does not include the
6213 * last fragment in its FWD-TSN we
6214 * WILL have a problem here since
6215 * you would have a partial chunk in
6216 * queue that may not be
6217 * deliverable. Also if a Partial
6218 * delivery API as started the user
6219 * may get a partial chunk. The next
6220 * read returning a new chunk...
6221 * really ugly but I see no way
6222 * around it! Maybe a notify??
6224 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
6225 chk->rec.data.stream_seq;
6228 sctp_m_freem(chk->data);
6231 sctp_free_a_chunk(stcb, chk);
6234 * Ok we have gone beyond the end of the
6242 /*******************************************************/
6243 /* 3. Update the PR-stream re-ordering queues and fix */
6244 /* delivery issues as needed. */
6245 /*******************************************************/
6246 fwd_sz -= sizeof(*fwd);
6249 unsigned int num_str;
6250 struct sctp_strseq *stseq, strseqbuf;
6252 offset += sizeof(*fwd);
6254 SCTP_INP_READ_LOCK(stcb->sctp_ep);
6255 num_str = fwd_sz / sizeof(struct sctp_strseq);
6256 for (i = 0; i < num_str; i++) {
6259 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
6260 sizeof(struct sctp_strseq),
6261 (uint8_t *) & strseqbuf);
6262 offset += sizeof(struct sctp_strseq);
6263 if (stseq == NULL) {
6267 st = ntohs(stseq->stream);
6269 st = ntohs(stseq->sequence);
6270 stseq->sequence = st;
6275 * Ok we now look for the stream/seq on the read
6276 * queue where its not all delivered. If we find it
6277 * we transmute the read entry into a PDI_ABORTED.
6279 if (stseq->stream >= asoc->streamincnt) {
6280 /* screwed up streams, stop! */
6283 if ((asoc->str_of_pdapi == stseq->stream) &&
6284 (asoc->ssn_of_pdapi == stseq->sequence)) {
6286 * If this is the one we were partially
6287 * delivering now then we no longer are.
6288 * Note this will change with the reassembly
6291 asoc->fragmented_delivery_inprogress = 0;
6293 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
6294 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
6295 if ((ctl->sinfo_stream == stseq->stream) &&
6296 (ctl->sinfo_ssn == stseq->sequence)) {
6297 str_seq = (stseq->stream << 16) | stseq->sequence;
6299 ctl->pdapi_aborted = 1;
6300 sv = stcb->asoc.control_pdapi;
6301 stcb->asoc.control_pdapi = ctl;
6302 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
6304 SCTP_PARTIAL_DELIVERY_ABORTED,
6306 SCTP_SO_NOT_LOCKED);
6307 stcb->asoc.control_pdapi = sv;
6309 } else if ((ctl->sinfo_stream == stseq->stream) &&
6310 (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
6311 /* We are past our victim SSN */
6315 strm = &asoc->strmin[stseq->stream];
6316 if (compare_with_wrap(stseq->sequence,
6317 strm->last_sequence_delivered, MAX_SEQ)) {
6318 /* Update the sequence number */
6319 strm->last_sequence_delivered =
6322 /* now kick the stream the new way */
6323 /* sa_ignore NO_NULL_CHK */
6324 sctp_kick_prsctp_reorder_queue(stcb, strm);
6326 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
6328 if (TAILQ_FIRST(&asoc->reasmqueue)) {
6329 /* now lets kick out and check for more fragmented delivery */
6330 /* sa_ignore NO_NULL_CHK */
6331 sctp_deliver_reasm_check(stcb, &stcb->asoc);
6335 /* EY fully identical to sctp_express_handle_sack, duplicated for only naming convention */
6337 sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
6338 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
6340 struct sctp_nets *net;
6341 struct sctp_association *asoc;
6342 struct sctp_tmit_chunk *tp1, *tp2;
6344 int win_probe_recovery = 0;
6345 int win_probe_recovered = 0;
6346 int j, done_once = 0;
6348 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
6349 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
6350 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
6352 SCTP_TCB_LOCK_ASSERT(stcb);
6353 #ifdef SCTP_ASOCLOG_OF_TSNS
6354 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
6355 stcb->asoc.cumack_log_at++;
6356 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
6357 stcb->asoc.cumack_log_at = 0;
6361 old_rwnd = asoc->peers_rwnd;
6362 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
6365 } else if (asoc->last_acked_seq == cumack) {
6366 /* Window update sack */
6367 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6368 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6369 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6370 /* SWS sender side engages */
6371 asoc->peers_rwnd = 0;
6373 if (asoc->peers_rwnd > old_rwnd) {
6378 /* First setup for CC stuff */
6379 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6380 net->prev_cwnd = net->cwnd;
6385 * CMT: Reset CUC and Fast recovery algo variables before
6388 net->new_pseudo_cumack = 0;
6389 net->will_exit_fast_recovery = 0;
6391 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
6394 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
6395 tp1 = TAILQ_LAST(&asoc->sent_queue,
6396 sctpchunk_listhead);
6397 send_s = tp1->rec.data.TSN_seq + 1;
6399 send_s = asoc->sending_seq;
6401 if ((cumack == send_s) ||
6402 compare_with_wrap(cumack, send_s, MAX_TSN)) {
6408 panic("Impossible sack 1");
6412 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6413 0, M_DONTWAIT, 1, MT_DATA);
6415 struct sctp_paramhdr *ph;
6418 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6420 ph = mtod(oper, struct sctp_paramhdr *);
6421 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6422 ph->param_length = htons(SCTP_BUF_LEN(oper));
6423 ippp = (uint32_t *) (ph + 1);
6424 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
6426 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
6427 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6432 asoc->this_sack_highest_gap = cumack;
6433 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
6434 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
6435 stcb->asoc.overall_error_count,
6437 SCTP_FROM_SCTP_INDATA,
6440 stcb->asoc.overall_error_count = 0;
6441 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
6442 /* process the new consecutive TSN first */
6443 tp1 = TAILQ_FIRST(&asoc->sent_queue);
6445 tp2 = TAILQ_NEXT(tp1, sctp_next);
6446 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
6448 cumack == tp1->rec.data.TSN_seq) {
6449 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
6450 printf("Warning, an unsent is now acked?\n");
6453 * ECN Nonce: Add the nonce to the sender's
6456 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
6457 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
6459 * If it is less than ACKED, it is
6460 * now no-longer in flight. Higher
6461 * values may occur during marking
6463 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6464 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6465 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
6466 tp1->whoTo->flight_size,
6468 (uintptr_t) tp1->whoTo,
6469 tp1->rec.data.TSN_seq);
6471 sctp_flight_size_decrease(tp1);
6472 /* sa_ignore NO_NULL_CHK */
6473 sctp_total_flight_decrease(stcb, tp1);
6475 tp1->whoTo->net_ack += tp1->send_size;
6476 if (tp1->snd_count < 2) {
6478 * True non-retransmited
6481 tp1->whoTo->net_ack2 +=
6484 /* update RTO too? */
6491 sctp_calculate_rto(stcb,
6493 &tp1->sent_rcv_time,
6494 sctp_align_safe_nocopy);
6499 * CMT: CUCv2 algorithm. From the
6500 * cumack'd TSNs, for each TSN being
6501 * acked for the first time, set the
6502 * following variables for the
6503 * corresp destination.
6504 * new_pseudo_cumack will trigger a
6506 * find_(rtx_)pseudo_cumack will
6507 * trigger search for the next
6508 * expected (rtx-)pseudo-cumack.
6510 tp1->whoTo->new_pseudo_cumack = 1;
6511 tp1->whoTo->find_pseudo_cumack = 1;
6512 tp1->whoTo->find_rtx_pseudo_cumack = 1;
6514 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
6515 /* sa_ignore NO_NULL_CHK */
6516 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
6519 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6520 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
6522 if (tp1->rec.data.chunk_was_revoked) {
6523 /* deflate the cwnd */
6524 tp1->whoTo->cwnd -= tp1->book_size;
6525 tp1->rec.data.chunk_was_revoked = 0;
6527 tp1->sent = SCTP_DATAGRAM_ACKED;
6528 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
6530 /* sa_ignore NO_NULL_CHK */
6531 sctp_free_bufspace(stcb, asoc, tp1, 1);
6532 sctp_m_freem(tp1->data);
6534 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6535 sctp_log_sack(asoc->last_acked_seq,
6537 tp1->rec.data.TSN_seq,
6540 SCTP_LOG_FREE_SENT);
6543 asoc->sent_queue_cnt--;
6544 sctp_free_a_chunk(stcb, tp1);
6552 /* sa_ignore NO_NULL_CHK */
6553 if (stcb->sctp_socket) {
6554 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6559 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
6560 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6561 /* sa_ignore NO_NULL_CHK */
6562 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
6564 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6565 so = SCTP_INP_SO(stcb->sctp_ep);
6566 atomic_add_int(&stcb->asoc.refcnt, 1);
6567 SCTP_TCB_UNLOCK(stcb);
6568 SCTP_SOCKET_LOCK(so, 1);
6569 SCTP_TCB_LOCK(stcb);
6570 atomic_subtract_int(&stcb->asoc.refcnt, 1);
6571 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
6572 /* assoc was freed while we were unlocked */
6573 SCTP_SOCKET_UNLOCK(so, 1);
6577 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
6578 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6579 SCTP_SOCKET_UNLOCK(so, 1);
6582 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6583 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
6587 /* JRS - Use the congestion control given in the CC module */
6588 if (asoc->last_acked_seq != cumack)
6589 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
6591 asoc->last_acked_seq = cumack;
6593 if (TAILQ_EMPTY(&asoc->sent_queue)) {
6594 /* nothing left in-flight */
6595 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6596 net->flight_size = 0;
6597 net->partial_bytes_acked = 0;
6599 asoc->total_flight = 0;
6600 asoc->total_flight_count = 0;
6602 /* Fix up the a-p-a-p for future PR-SCTP sends */
6603 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
6604 asoc->advanced_peer_ack_point = cumack;
6606 /* ECN Nonce updates */
6607 if (asoc->ecn_nonce_allowed) {
6608 if (asoc->nonce_sum_check) {
6609 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
6610 if (asoc->nonce_wait_for_ecne == 0) {
6611 struct sctp_tmit_chunk *lchk;
6613 lchk = TAILQ_FIRST(&asoc->send_queue);
6614 asoc->nonce_wait_for_ecne = 1;
6616 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
6618 asoc->nonce_wait_tsn = asoc->sending_seq;
6621 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
6622 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
6624 * Misbehaving peer. We need
6625 * to react to this guy
6627 asoc->ecn_allowed = 0;
6628 asoc->ecn_nonce_allowed = 0;
6633 /* See if Resynchronization Possible */
6634 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
6635 asoc->nonce_sum_check = 1;
6637 * now we must calculate what the base is.
6638 * We do this based on two things, we know
6639 * the total's for all the segments
6640 * gap-acked in the SACK (none), We also
6641 * know the SACK's nonce sum, its in
6642 * nonce_sum_flag. So we can build a truth
6643 * table to back-calculate the new value of
6644 * asoc->nonce_sum_expect_base:
6646 * SACK-flag-Value Seg-Sums Base 0 0 0
6649 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
6654 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6655 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6656 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6657 /* SWS sender side engages */
6658 asoc->peers_rwnd = 0;
6660 if (asoc->peers_rwnd > old_rwnd) {
6661 win_probe_recovery = 1;
6663 /* Now assure a timer where data is queued at */
6666 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6669 if (win_probe_recovery && (net->window_probe)) {
6670 win_probe_recovered = 1;
6672 * Find first chunk that was used with window probe
6673 * and clear the sent
6675 /* sa_ignore FREED_MEMORY */
6676 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6677 if (tp1->window_probe) {
6678 /* move back to data send queue */
6679 sctp_window_probe_recovery(stcb, asoc, net, tp1);
6684 if (net->RTO == 0) {
6685 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
6687 to_ticks = MSEC_TO_TICKS(net->RTO);
6689 if (net->flight_size) {
6692 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6693 sctp_timeout_handler, &net->rxt_timer);
6694 if (net->window_probe) {
6695 net->window_probe = 0;
6698 if (net->window_probe) {
6700 * In window probes we must assure a timer
6701 * is still running there
6703 net->window_probe = 0;
6704 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6705 sctp_timeout_handler, &net->rxt_timer);
6706 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
6707 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
6709 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
6711 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
6712 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
6713 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
6714 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
6715 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
6721 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
6722 (asoc->sent_queue_retran_cnt == 0) &&
6723 (win_probe_recovered == 0) &&
6726 * huh, this should not happen unless all packets are
6727 * PR-SCTP and marked to skip of course.
6729 if (sctp_fs_audit(asoc)) {
6730 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6731 net->flight_size = 0;
6733 asoc->total_flight = 0;
6734 asoc->total_flight_count = 0;
6735 asoc->sent_queue_retran_cnt = 0;
6736 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6737 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6738 sctp_flight_size_increase(tp1);
6739 sctp_total_flight_increase(stcb, tp1);
6740 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6741 asoc->sent_queue_retran_cnt++;
6748 /**********************************/
6749 /* Now what about shutdown issues */
6750 /**********************************/
6751 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
6752 /* nothing left on sendqueue.. consider done */
6754 if ((asoc->stream_queue_cnt == 1) &&
6755 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
6756 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
6757 (asoc->locked_on_sending)
6759 struct sctp_stream_queue_pending *sp;
6762 * I may be in a state where we got all across.. but
6763 * cannot write more due to a shutdown... we abort
6764 * since the user did not indicate EOR in this case.
6765 * The sp will be cleaned during free of the asoc.
6767 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
6769 if ((sp) && (sp->length == 0)) {
6770 /* Let cleanup code purge it */
6771 if (sp->msg_is_complete) {
6772 asoc->stream_queue_cnt--;
6774 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6775 asoc->locked_on_sending = NULL;
6776 asoc->stream_queue_cnt--;
6780 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
6781 (asoc->stream_queue_cnt == 0)) {
6782 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6783 /* Need to abort here */
6789 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6790 0, M_DONTWAIT, 1, MT_DATA);
6792 struct sctp_paramhdr *ph;
6795 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6797 ph = mtod(oper, struct sctp_paramhdr *);
6798 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6799 ph->param_length = htons(SCTP_BUF_LEN(oper));
6800 ippp = (uint32_t *) (ph + 1);
6801 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
6803 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
6804 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
6806 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
6807 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
6808 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6810 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6811 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6812 sctp_stop_timers_for_shutdown(stcb);
6813 sctp_send_shutdown(stcb,
6814 stcb->asoc.primary_destination);
6815 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
6816 stcb->sctp_ep, stcb, asoc->primary_destination);
6817 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
6818 stcb->sctp_ep, stcb, asoc->primary_destination);
6820 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
6821 (asoc->stream_queue_cnt == 0)) {
6822 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6825 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6826 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
6827 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6828 sctp_send_shutdown_ack(stcb,
6829 stcb->asoc.primary_destination);
6831 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
6832 stcb->sctp_ep, stcb, asoc->primary_destination);
6835 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
6836 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
6838 stcb->asoc.peers_rwnd,
6839 stcb->asoc.total_flight,
6840 stcb->asoc.total_output_queue_size);
6844 /* EY! nr_sack version of sctp_handle_segments, nr-gapped TSNs get removed from RtxQ in this method*/
6846 sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
6847 struct sctp_nr_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
6848 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
6849 uint32_t num_seg, uint32_t num_nr_seg, int *ecn_seg_sums)
6851 /************************************************/
6852 /* process fragments and update sendqueue */
6853 /************************************************/
6854 struct sctp_nr_sack *nr_sack;
6855 struct sctp_gap_ack_block *frag, block;
6856 struct sctp_nr_gap_ack_block *nr_frag, nr_block;
6857 struct sctp_tmit_chunk *tp1;
6863 uint16_t frag_strt, frag_end, primary_flag_set;
6864 uint16_t nr_frag_strt, nr_frag_end;
6866 uint32_t last_frag_high;
6867 uint32_t last_nr_frag_high;
6870 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
6872 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
6873 primary_flag_set = 1;
6875 primary_flag_set = 0;
6877 nr_sack = &ch->nr_sack;
6880 * EY! - I will process nr_gaps similarly,by going to this position
6881 * again if All bit is set
6883 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
6884 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
6885 *offset += sizeof(block);
6891 for (i = 0; i < num_seg; i++) {
6892 frag_strt = ntohs(frag->start);
6893 frag_end = ntohs(frag->end);
6894 /* some sanity checks on the fargment offsets */
6895 if (frag_strt > frag_end) {
6896 /* this one is malformed, skip */
6900 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
6902 *biggest_tsn_acked = frag_end + last_tsn;
6904 /* mark acked dgs and find out the highestTSN being acked */
6906 tp1 = TAILQ_FIRST(&asoc->sent_queue);
6908 /* save the locations of the last frags */
6909 last_frag_high = frag_end + last_tsn;
6912 * now lets see if we need to reset the queue due to
6913 * a out-of-order SACK fragment
6915 if (compare_with_wrap(frag_strt + last_tsn,
6916 last_frag_high, MAX_TSN)) {
6918 * if the new frag starts after the last TSN
6919 * frag covered, we are ok and this one is
6920 * beyond the last one
6925 * ok, they have reset us, so we need to
6926 * reset the queue this will cause extra
6927 * hunting but hey, they chose the
6928 * performance hit when they failed to order
6931 tp1 = TAILQ_FIRST(&asoc->sent_queue);
6933 last_frag_high = frag_end + last_tsn;
6935 for (j = frag_strt; j <= frag_end; j++) {
6936 theTSN = j + last_tsn;
6938 if (tp1->rec.data.doing_fast_retransmit)
6942 * CMT: CUCv2 algorithm. For each TSN being
6943 * processed from the sent queue, track the
6944 * next expected pseudo-cumack, or
6945 * rtx_pseudo_cumack, if required. Separate
6946 * cumack trackers for first transmissions,
6947 * and retransmissions.
6949 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6950 (tp1->snd_count == 1)) {
6951 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
6952 tp1->whoTo->find_pseudo_cumack = 0;
6954 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6955 (tp1->snd_count > 1)) {
6956 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
6957 tp1->whoTo->find_rtx_pseudo_cumack = 0;
6959 if (tp1->rec.data.TSN_seq == theTSN) {
6960 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
6962 * must be held until
6966 * ECN Nonce: Add the nonce
6967 * value to the sender's
6970 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6972 * If it is less than RESEND, it is
6973 * now no-longer in flight.
6974 * Higher values may already be set
6975 * via previous Gap Ack Blocks...
6976 * i.e. ACKED or RESEND.
6978 if (compare_with_wrap(tp1->rec.data.TSN_seq,
6979 *biggest_newly_acked_tsn, MAX_TSN)) {
6980 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
6989 * this_sack_highest_
6993 if (tp1->rec.data.chunk_was_revoked == 0)
6994 tp1->whoTo->saw_newack = 1;
6996 if (compare_with_wrap(tp1->rec.data.TSN_seq,
6997 tp1->whoTo->this_sack_highest_newack,
6999 tp1->whoTo->this_sack_highest_newack =
7000 tp1->rec.data.TSN_seq;
7005 * this_sack_lowest_n
7008 if (*this_sack_lowest_newack == 0) {
7009 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7010 sctp_log_sack(*this_sack_lowest_newack,
7012 tp1->rec.data.TSN_seq,
7015 SCTP_LOG_TSN_ACKED);
7017 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
7022 * (rtx-)pseudo-cumac
7027 * (rtx-)pseudo-cumac
7029 * new_(rtx_)pseudo_c
7037 * (rtx-)pseudo-cumac
7045 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
7046 if (tp1->rec.data.chunk_was_revoked == 0) {
7047 tp1->whoTo->new_pseudo_cumack = 1;
7049 tp1->whoTo->find_pseudo_cumack = 1;
7051 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7052 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
7054 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
7055 if (tp1->rec.data.chunk_was_revoked == 0) {
7056 tp1->whoTo->new_pseudo_cumack = 1;
7058 tp1->whoTo->find_rtx_pseudo_cumack = 1;
7060 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7061 sctp_log_sack(*biggest_newly_acked_tsn,
7063 tp1->rec.data.TSN_seq,
7066 SCTP_LOG_TSN_ACKED);
7068 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7069 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
7070 tp1->whoTo->flight_size,
7072 (uintptr_t) tp1->whoTo,
7073 tp1->rec.data.TSN_seq);
7075 sctp_flight_size_decrease(tp1);
7076 sctp_total_flight_decrease(stcb, tp1);
7078 tp1->whoTo->net_ack += tp1->send_size;
7079 if (tp1->snd_count < 2) {
7086 tp1->whoTo->net_ack2 += tp1->send_size;
7094 sctp_calculate_rto(stcb,
7097 &tp1->sent_rcv_time,
7098 sctp_align_safe_nocopy);
7103 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
7104 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
7105 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
7106 if (compare_with_wrap(tp1->rec.data.TSN_seq,
7107 asoc->this_sack_highest_gap,
7109 asoc->this_sack_highest_gap =
7110 tp1->rec.data.TSN_seq;
7112 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7113 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7114 #ifdef SCTP_AUDITING_ENABLED
7115 sctp_audit_log(0xB2,
7116 (asoc->sent_queue_retran_cnt & 0x000000ff));
7121 * All chunks NOT UNSENT
7122 * fall through here and are
7125 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
7126 tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7127 if (tp1->rec.data.chunk_was_revoked) {
7128 /* deflate the cwnd */
7129 tp1->whoTo->cwnd -= tp1->book_size;
7130 tp1->rec.data.chunk_was_revoked = 0;
7134 } /* if (tp1->TSN_seq == theTSN) */
7135 if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
7139 tp1 = TAILQ_NEXT(tp1, sctp_next);
7140 } /* end while (tp1) */
7141 } /* end for (j = fragStart */
7142 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
7143 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
7144 *offset += sizeof(block);
7150 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
7152 sctp_log_fr(*biggest_tsn_acked,
7153 *biggest_newly_acked_tsn,
7154 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
7156 nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7157 sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7158 *offset += sizeof(nr_block);
7162 if (nr_frag == NULL) {
7166 last_nr_frag_high = 0;
7168 for (i = 0; i < num_nr_seg; i++) {
7170 nr_frag_strt = ntohs(nr_frag->start);
7171 nr_frag_end = ntohs(nr_frag->end);
7173 /* some sanity checks on the nr fargment offsets */
7174 if (nr_frag_strt > nr_frag_end) {
7175 /* this one is malformed, skip */
7179 /* mark acked dgs and find out the highestTSN being acked */
7181 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7183 /* save the locations of the last frags */
7184 last_nr_frag_high = nr_frag_end + last_tsn;
7187 * now lets see if we need to reset the queue due to
7188 * a out-of-order SACK fragment
7190 if (compare_with_wrap(nr_frag_strt + last_tsn,
7191 last_nr_frag_high, MAX_TSN)) {
7193 * if the new frag starts after the last TSN
7194 * frag covered, we are ok and this one is
7195 * beyond the last one
7200 * ok, they have reset us, so we need to
7201 * reset the queue this will cause extra
7202 * hunting but hey, they chose the
7203 * performance hit when they failed to order
7206 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7208 last_nr_frag_high = nr_frag_end + last_tsn;
7211 for (j = nr_frag_strt + last_tsn; (compare_with_wrap((nr_frag_end + last_tsn), j, MAX_TSN)); j++) {
7213 if (tp1->rec.data.TSN_seq == j) {
7214 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7215 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
7216 tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7218 * TAILQ_REMOVE(&asoc->sent_q
7219 * ueue, tp1, sctp_next);
7226 sctp_free_bufspace(stcb, asoc, tp1, 1);
7227 sctp_m_freem(tp1->data);
7230 /* asoc->sent_queue_cnt--; */
7232 * sctp_free_a_chunk(stcb,
7238 } /* if (tp1->TSN_seq == j) */
7239 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
7242 tp1 = TAILQ_NEXT(tp1, sctp_next);
7243 } /* end while (tp1) */
7245 } /* end for (j = nrFragStart */
7247 nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7248 sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7249 *offset += sizeof(nr_block);
7250 if (nr_frag == NULL) {
7256 * EY- wake up the socket if things have been removed from the sent
7259 if ((wake_him) && (stcb->sctp_socket)) {
7260 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7264 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7266 * if (SCTP_BASE_SYSCTL(sctp_logging_level) &
7267 * SCTP_WAKE_LOGGING_ENABLE) { sctp_wakeup_log(stcb,
7268 * cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);}
7270 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7271 so = SCTP_INP_SO(stcb->sctp_ep);
7272 atomic_add_int(&stcb->asoc.refcnt, 1);
7273 SCTP_TCB_UNLOCK(stcb);
7274 SCTP_SOCKET_LOCK(so, 1);
7275 SCTP_TCB_LOCK(stcb);
7276 atomic_subtract_int(&stcb->asoc.refcnt, 1);
7277 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7278 /* assoc was freed while we were unlocked */
7279 SCTP_SOCKET_UNLOCK(so, 1);
7283 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7284 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7285 SCTP_SOCKET_UNLOCK(so, 1);
7288 * (SCTP_BASE_SYSCTL(sctp_logging_level) &
7289 * SCTP_WAKE_LOGGING_ENABLE) {
7290 * sctp_wakeup_log(stcb, cum_ack, wake_him,
7291 * SCTP_NOWAKE_FROM_SACK); } } */
7295 /* Identifies the non-renegable tsns that are revoked*/
7297 sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
7298 struct sctp_association *asoc, uint32_t cumack,
7299 u_long biggest_tsn_acked)
7301 struct sctp_tmit_chunk *tp1;
7303 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7305 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
7308 * ok this guy is either ACK or MARKED. If it is
7309 * ACKED it has been previously acked but not this
7310 * time i.e. revoked. If it is MARKED it was ACK'ed
7313 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
7318 if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
7320 * EY! a non-renegable TSN is revoked, need
7321 * to abort the association
7324 * EY TODO: put in the code to abort the
7328 } else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
7329 /* it has been re-acked in this SACK */
7330 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
7333 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
7335 tp1 = TAILQ_NEXT(tp1, sctp_next);
7339 /* EY! nr_sack version of sctp_handle_sack, nr_gap_ack processing should be added to this method*/
7341 sctp_handle_nr_sack(struct mbuf *m, int offset,
7342 struct sctp_nr_sack_chunk *ch, struct sctp_tcb *stcb,
7343 struct sctp_nets *net_from, int *abort_now, int nr_sack_len, uint32_t rwnd)
7345 struct sctp_association *asoc;
7348 struct sctp_nr_sack *nr_sack;
7349 struct sctp_tmit_chunk *tp1, *tp2;
7350 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
7351 this_sack_lowest_newack;
7352 uint32_t sav_cum_ack;
7355 uint16_t num_seg, num_nr_seg, num_dup;
7356 uint16_t wake_him = 0;
7357 unsigned int nr_sack_length;
7358 uint32_t send_s = 0;
7360 int accum_moved = 0;
7361 int will_exit_fast_recovery = 0;
7362 uint32_t a_rwnd, old_rwnd;
7363 int win_probe_recovery = 0;
7364 int win_probe_recovered = 0;
7365 struct sctp_nets *net = NULL;
7366 int nonce_sum_flag, ecn_seg_sums = 0;
7368 uint8_t reneged_all = 0;
7369 uint8_t cmt_dac_flag;
7372 * we take any chance we can to service our queues since we cannot
7373 * get awoken when the socket is read from :<
7376 * Now perform the actual SACK handling: 1) Verify that it is not an
7377 * old sack, if so discard. 2) If there is nothing left in the send
7378 * queue (cum-ack is equal to last acked) then you have a duplicate
7379 * too, update any rwnd change and verify no timers are running.
7380 * then return. 3) Process any new consequtive data i.e. cum-ack
7381 * moved process these first and note that it moved. 4) Process any
7382 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
7383 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
7384 * sync up flightsizes and things, stop all timers and also check
7385 * for shutdown_pending state. If so then go ahead and send off the
7386 * shutdown. If in shutdown recv, send off the shutdown-ack and
7387 * start that timer, Ret. 9) Strike any non-acked things and do FR
7388 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
7389 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
7390 * if in shutdown_recv state.
7392 SCTP_TCB_LOCK_ASSERT(stcb);
7393 nr_sack = &ch->nr_sack;
7395 this_sack_lowest_newack = 0;
7397 nr_sack_length = (unsigned int)nr_sack_len;
7399 SCTP_STAT_INCR(sctps_slowpath_sack);
7400 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
7401 cum_ack = last_tsn = ntohl(nr_sack->cum_tsn_ack);
7402 #ifdef SCTP_ASOCLOG_OF_TSNS
7403 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
7404 stcb->asoc.cumack_log_at++;
7405 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
7406 stcb->asoc.cumack_log_at = 0;
7409 num_seg = ntohs(nr_sack->num_gap_ack_blks);
7410 num_nr_seg = ntohs(nr_sack->num_nr_gap_ack_blks);
7413 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
7414 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
7415 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
7418 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
7419 num_dup = ntohs(nr_sack->num_dup_tsns);
7421 old_rwnd = stcb->asoc.peers_rwnd;
7422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
7423 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
7424 stcb->asoc.overall_error_count,
7426 SCTP_FROM_SCTP_INDATA,
7429 stcb->asoc.overall_error_count = 0;
7431 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7432 sctp_log_sack(asoc->last_acked_seq,
7439 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
7440 int off_to_dup, iii;
7441 uint32_t *dupdata, dblock;
7443 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) +
7444 (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) + sizeof(struct sctp_nr_sack_chunk);
7445 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= nr_sack_length) {
7446 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7447 sizeof(uint32_t), (uint8_t *) & dblock);
7448 off_to_dup += sizeof(uint32_t);
7450 for (iii = 0; iii < num_dup; iii++) {
7451 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
7452 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7453 sizeof(uint32_t), (uint8_t *) & dblock);
7454 if (dupdata == NULL)
7456 off_to_dup += sizeof(uint32_t);
7460 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d nr_sack_len:%d num gaps:%d num nr_gaps:%d\n",
7461 off_to_dup, num_dup, nr_sack_length, num_seg, num_nr_seg);
7464 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7466 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
7467 tp1 = TAILQ_LAST(&asoc->sent_queue,
7468 sctpchunk_listhead);
7469 send_s = tp1->rec.data.TSN_seq + 1;
7471 send_s = asoc->sending_seq;
7473 if (cum_ack == send_s ||
7474 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
7481 panic("Impossible sack 1");
7486 * no way, we have not even sent this TSN out yet.
7487 * Peer is hopelessly messed up with us.
7492 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
7493 0, M_DONTWAIT, 1, MT_DATA);
7495 struct sctp_paramhdr *ph;
7498 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
7500 ph = mtod(oper, struct sctp_paramhdr *);
7501 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
7502 ph->param_length = htons(SCTP_BUF_LEN(oper));
7503 ippp = (uint32_t *) (ph + 1);
7504 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
7506 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
7507 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
7512 /**********************/
7513 /* 1) check the range */
7514 /**********************/
7515 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
7516 /* acking something behind */
7519 sav_cum_ack = asoc->last_acked_seq;
7521 /* update the Rwnd of the peer */
7522 if (TAILQ_EMPTY(&asoc->sent_queue) &&
7523 TAILQ_EMPTY(&asoc->send_queue) &&
7524 (asoc->stream_queue_cnt == 0)
7526 /* nothing left on send/sent and strmq */
7527 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7528 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7529 asoc->peers_rwnd, 0, 0, a_rwnd);
7531 asoc->peers_rwnd = a_rwnd;
7532 if (asoc->sent_queue_retran_cnt) {
7533 asoc->sent_queue_retran_cnt = 0;
7535 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7536 /* SWS sender side engages */
7537 asoc->peers_rwnd = 0;
7539 /* stop any timers */
7540 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7541 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7542 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7543 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7544 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7545 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
7546 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7547 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7550 net->partial_bytes_acked = 0;
7551 net->flight_size = 0;
7553 asoc->total_flight = 0;
7554 asoc->total_flight_count = 0;
7558 * We init netAckSz and netAckSz2 to 0. These are used to track 2
7559 * things. The total byte count acked is tracked in netAckSz AND
7560 * netAck2 is used to track the total bytes acked that are un-
7561 * amibguious and were never retransmitted. We track these on a per
7562 * destination address basis.
7564 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7565 net->prev_cwnd = net->cwnd;
7570 * CMT: Reset CUC and Fast recovery algo variables before
7573 net->new_pseudo_cumack = 0;
7574 net->will_exit_fast_recovery = 0;
7576 /* process the new consecutive TSN first */
7577 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7579 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
7581 last_tsn == tp1->rec.data.TSN_seq) {
7582 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7584 * ECN Nonce: Add the nonce to the sender's
7587 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
7589 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
7591 * If it is less than ACKED, it is
7592 * now no-longer in flight. Higher
7593 * values may occur during marking
7595 if ((tp1->whoTo->dest_state &
7596 SCTP_ADDR_UNCONFIRMED) &&
7597 (tp1->snd_count < 2)) {
7599 * If there was no retran
7600 * and the address is
7601 * un-confirmed and we sent
7603 * sacked.. its confirmed,
7606 tp1->whoTo->dest_state &=
7607 ~SCTP_ADDR_UNCONFIRMED;
7609 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
7610 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7611 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
7612 tp1->whoTo->flight_size,
7614 (uintptr_t) tp1->whoTo,
7615 tp1->rec.data.TSN_seq);
7617 sctp_flight_size_decrease(tp1);
7618 sctp_total_flight_decrease(stcb, tp1);
7620 tp1->whoTo->net_ack += tp1->send_size;
7622 /* CMT SFR and DAC algos */
7623 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
7624 tp1->whoTo->saw_newack = 1;
7626 if (tp1->snd_count < 2) {
7628 * True non-retransmited
7631 tp1->whoTo->net_ack2 +=
7634 /* update RTO too? */
7637 sctp_calculate_rto(stcb,
7639 &tp1->sent_rcv_time,
7640 sctp_align_safe_nocopy);
7645 * CMT: CUCv2 algorithm. From the
7646 * cumack'd TSNs, for each TSN being
7647 * acked for the first time, set the
7648 * following variables for the
7649 * corresp destination.
7650 * new_pseudo_cumack will trigger a
7652 * find_(rtx_)pseudo_cumack will
7653 * trigger search for the next
7654 * expected (rtx-)pseudo-cumack.
7656 tp1->whoTo->new_pseudo_cumack = 1;
7657 tp1->whoTo->find_pseudo_cumack = 1;
7658 tp1->whoTo->find_rtx_pseudo_cumack = 1;
7661 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7662 sctp_log_sack(asoc->last_acked_seq,
7664 tp1->rec.data.TSN_seq,
7667 SCTP_LOG_TSN_ACKED);
7669 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7670 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
7673 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7674 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7675 #ifdef SCTP_AUDITING_ENABLED
7676 sctp_audit_log(0xB3,
7677 (asoc->sent_queue_retran_cnt & 0x000000ff));
7680 if (tp1->rec.data.chunk_was_revoked) {
7681 /* deflate the cwnd */
7682 tp1->whoTo->cwnd -= tp1->book_size;
7683 tp1->rec.data.chunk_was_revoked = 0;
7685 tp1->sent = SCTP_DATAGRAM_ACKED;
7690 tp1 = TAILQ_NEXT(tp1, sctp_next);
7692 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
7693 /* always set this up to cum-ack */
7694 asoc->this_sack_highest_gap = last_tsn;
7696 /* Move offset up to point to gaps/dups */
7697 offset += sizeof(struct sctp_nr_sack_chunk);
7698 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_nr_sack_chunk)) > nr_sack_length) {
7700 /* skip corrupt segments */
7706 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
7707 * to be greater than the cumack. Also reset saw_newack to 0
7710 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7711 net->saw_newack = 0;
7712 net->this_sack_highest_newack = last_tsn;
7716 * thisSackHighestGap will increase while handling NEW
7717 * segments this_sack_highest_newack will increase while
7718 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
7719 * used for CMT DAC algo. saw_newack will also change.
7722 sctp_handle_nr_sack_segments(m, &offset, stcb, asoc, ch, last_tsn,
7723 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
7724 num_seg, num_nr_seg, &ecn_seg_sums);
7727 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7729 * validate the biggest_tsn_acked in the gap acks if
7730 * strict adherence is wanted.
7732 if ((biggest_tsn_acked == send_s) ||
7733 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
7735 * peer is either confused or we are under
7736 * attack. We must abort.
7743 /*******************************************/
7744 /* cancel ALL T3-send timer if accum moved */
7745 /*******************************************/
7746 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
7747 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7748 if (net->new_pseudo_cumack)
7749 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7751 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
7756 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7757 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7758 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
7762 /********************************************/
7763 /* drop the acked chunks from the sendqueue */
7764 /********************************************/
7765 asoc->last_acked_seq = cum_ack;
7767 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7771 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
7775 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
7776 /* no more sent on list */
7777 printf("Warning, tp1->sent == %d and its now acked?\n",
7780 tp2 = TAILQ_NEXT(tp1, sctp_next);
7781 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
7782 if (tp1->pr_sctp_on) {
7783 if (asoc->pr_sctp_cnt != 0)
7784 asoc->pr_sctp_cnt--;
7786 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
7787 (asoc->total_flight > 0)) {
7789 panic("Warning flight size is postive and should be 0");
7791 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
7792 asoc->total_flight);
7794 asoc->total_flight = 0;
7797 /* sa_ignore NO_NULL_CHK */
7798 sctp_free_bufspace(stcb, asoc, tp1, 1);
7799 sctp_m_freem(tp1->data);
7800 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
7801 asoc->sent_queue_cnt_removeable--;
7804 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7805 sctp_log_sack(asoc->last_acked_seq,
7807 tp1->rec.data.TSN_seq,
7810 SCTP_LOG_FREE_SENT);
7813 asoc->sent_queue_cnt--;
7814 sctp_free_a_chunk(stcb, tp1);
7817 } while (tp1 != NULL);
7820 /* sa_ignore NO_NULL_CHK */
7821 if ((wake_him) && (stcb->sctp_socket)) {
7822 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7826 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7827 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7828 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
7830 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7831 so = SCTP_INP_SO(stcb->sctp_ep);
7832 atomic_add_int(&stcb->asoc.refcnt, 1);
7833 SCTP_TCB_UNLOCK(stcb);
7834 SCTP_SOCKET_LOCK(so, 1);
7835 SCTP_TCB_LOCK(stcb);
7836 atomic_subtract_int(&stcb->asoc.refcnt, 1);
7837 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7838 /* assoc was freed while we were unlocked */
7839 SCTP_SOCKET_UNLOCK(so, 1);
7843 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7844 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7845 SCTP_SOCKET_UNLOCK(so, 1);
7848 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7849 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
7853 if (asoc->fast_retran_loss_recovery && accum_moved) {
7854 if (compare_with_wrap(asoc->last_acked_seq,
7855 asoc->fast_recovery_tsn, MAX_TSN) ||
7856 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
7857 /* Setup so we will exit RFC2582 fast recovery */
7858 will_exit_fast_recovery = 1;
7862 * Check for revoked fragments:
7864 * if Previous sack - Had no frags then we can't have any revoked if
7865 * Previous sack - Had frag's then - If we now have frags aka
7866 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
7867 * some of them. else - The peer revoked all ACKED fragments, since
7868 * we had some before and now we have NONE.
7872 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7874 else if (asoc->saw_sack_with_frags) {
7875 int cnt_revoked = 0;
7877 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7879 /* Peer revoked all dg's marked or acked */
7880 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
7882 * EY- maybe check only if it is nr_acked
7883 * nr_marked may not be possible
7885 if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
7886 (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
7888 * EY! - TODO: Something previously
7889 * nr_gapped is reneged, abort the
7894 if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
7895 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
7896 tp1->sent = SCTP_DATAGRAM_SENT;
7897 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7898 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
7899 tp1->whoTo->flight_size,
7901 (uintptr_t) tp1->whoTo,
7902 tp1->rec.data.TSN_seq);
7904 sctp_flight_size_increase(tp1);
7905 sctp_total_flight_increase(stcb, tp1);
7906 tp1->rec.data.chunk_was_revoked = 1;
7908 * To ensure that this increase in
7909 * flightsize, which is artificial,
7910 * does not throttle the sender, we
7911 * also increase the cwnd
7914 tp1->whoTo->cwnd += tp1->book_size;
7922 asoc->saw_sack_with_frags = 0;
7925 asoc->saw_sack_with_frags = 1;
7927 asoc->saw_sack_with_frags = 0;
7929 /* EY! - not sure about if there should be an IF */
7931 sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7932 else if (asoc->saw_sack_with_nr_frags) {
7934 * EY!- TODO: all previously nr_gapped chunks have been
7935 * reneged abort the association
7937 asoc->saw_sack_with_nr_frags = 0;
7940 asoc->saw_sack_with_nr_frags = 1;
7942 asoc->saw_sack_with_nr_frags = 0;
7943 /* JRS - Use the congestion control given in the CC module */
7944 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
7946 if (TAILQ_EMPTY(&asoc->sent_queue)) {
7947 /* nothing left in-flight */
7948 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7949 /* stop all timers */
7950 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7951 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7952 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
7953 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7954 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
7957 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7958 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
7959 net->flight_size = 0;
7960 net->partial_bytes_acked = 0;
7962 asoc->total_flight = 0;
7963 asoc->total_flight_count = 0;
7965 /**********************************/
7966 /* Now what about shutdown issues */
7967 /**********************************/
7968 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
7969 /* nothing left on sendqueue.. consider done */
7970 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7971 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7972 asoc->peers_rwnd, 0, 0, a_rwnd);
7974 asoc->peers_rwnd = a_rwnd;
7975 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7976 /* SWS sender side engages */
7977 asoc->peers_rwnd = 0;
7980 if ((asoc->stream_queue_cnt == 1) &&
7981 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7982 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
7983 (asoc->locked_on_sending)
7985 struct sctp_stream_queue_pending *sp;
7988 * I may be in a state where we got all across.. but
7989 * cannot write more due to a shutdown... we abort
7990 * since the user did not indicate EOR in this case.
7992 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
7994 if ((sp) && (sp->length == 0)) {
7995 asoc->locked_on_sending = NULL;
7996 if (sp->msg_is_complete) {
7997 asoc->stream_queue_cnt--;
7999 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
8000 asoc->stream_queue_cnt--;
8004 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
8005 (asoc->stream_queue_cnt == 0)) {
8006 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
8007 /* Need to abort here */
8013 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
8014 0, M_DONTWAIT, 1, MT_DATA);
8016 struct sctp_paramhdr *ph;
8019 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
8021 ph = mtod(oper, struct sctp_paramhdr *);
8022 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
8023 ph->param_length = htons(SCTP_BUF_LEN(oper));
8024 ippp = (uint32_t *) (ph + 1);
8025 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
8027 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
8028 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
8031 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
8032 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
8033 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
8035 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
8036 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
8037 sctp_stop_timers_for_shutdown(stcb);
8038 sctp_send_shutdown(stcb,
8039 stcb->asoc.primary_destination);
8040 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
8041 stcb->sctp_ep, stcb, asoc->primary_destination);
8042 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
8043 stcb->sctp_ep, stcb, asoc->primary_destination);
8046 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
8047 (asoc->stream_queue_cnt == 0)) {
8048 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
8051 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
8052 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
8053 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
8054 sctp_send_shutdown_ack(stcb,
8055 stcb->asoc.primary_destination);
8057 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
8058 stcb->sctp_ep, stcb, asoc->primary_destination);
8063 * Now here we are going to recycle net_ack for a different use...
8066 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8071 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
8072 * to be done. Setting this_sack_lowest_newack to the cum_ack will
8073 * automatically ensure that.
8075 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
8076 this_sack_lowest_newack = cum_ack;
8079 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
8080 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
8082 /* JRS - Use the congestion control given in the CC module */
8083 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
8085 /******************************************************************
8086 * Here we do the stuff with ECN Nonce checking.
8087 * We basically check to see if the nonce sum flag was incorrect
8088 * or if resynchronization needs to be done. Also if we catch a
8089 * misbehaving receiver we give him the kick.
8090 ******************************************************************/
8092 if (asoc->ecn_nonce_allowed) {
8093 if (asoc->nonce_sum_check) {
8094 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
8095 if (asoc->nonce_wait_for_ecne == 0) {
8096 struct sctp_tmit_chunk *lchk;
8098 lchk = TAILQ_FIRST(&asoc->send_queue);
8099 asoc->nonce_wait_for_ecne = 1;
8101 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
8103 asoc->nonce_wait_tsn = asoc->sending_seq;
8106 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
8107 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
8109 * Misbehaving peer. We need
8110 * to react to this guy
8112 asoc->ecn_allowed = 0;
8113 asoc->ecn_nonce_allowed = 0;
8118 /* See if Resynchronization Possible */
8119 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
8120 asoc->nonce_sum_check = 1;
8122 * now we must calculate what the base is.
8123 * We do this based on two things, we know
8124 * the total's for all the segments
8125 * gap-acked in the SACK, its stored in
8126 * ecn_seg_sums. We also know the SACK's
8127 * nonce sum, its in nonce_sum_flag. So we
8128 * can build a truth table to back-calculate
8130 * asoc->nonce_sum_expect_base:
8132 * SACK-flag-Value Seg-Sums Base 0 0 0
8135 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
8139 /* Now are we exiting loss recovery ? */
8140 if (will_exit_fast_recovery) {
8141 /* Ok, we must exit fast recovery */
8142 asoc->fast_retran_loss_recovery = 0;
8144 if ((asoc->sat_t3_loss_recovery) &&
8145 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
8147 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
8148 /* end satellite t3 loss recovery */
8149 asoc->sat_t3_loss_recovery = 0;
8154 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8155 if (net->will_exit_fast_recovery) {
8156 /* Ok, we must exit fast recovery */
8157 net->fast_retran_loss_recovery = 0;
8161 /* Adjust and set the new rwnd value */
8162 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
8163 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
8164 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
8166 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
8167 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
8168 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
8169 /* SWS sender side engages */
8170 asoc->peers_rwnd = 0;
8172 if (asoc->peers_rwnd > old_rwnd) {
8173 win_probe_recovery = 1;
8176 * Now we must setup so we have a timer up for anyone with
8182 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8183 if (win_probe_recovery && (net->window_probe)) {
8184 win_probe_recovered = 1;
8186 * Find first chunk that was used with
8187 * window probe and clear the event. Put
8188 * it back into the send queue as if has
8191 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8192 if (tp1->window_probe) {
8193 sctp_window_probe_recovery(stcb, asoc, net, tp1);
8198 if (net->flight_size) {
8200 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8201 stcb->sctp_ep, stcb, net);
8202 if (net->window_probe) {
8203 net->window_probe = 0;
8206 if (net->window_probe) {
8207 net->window_probe = 0;
8208 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8209 stcb->sctp_ep, stcb, net);
8210 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8211 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
8213 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
8215 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8216 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8217 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
8218 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
8219 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
8225 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
8226 (asoc->sent_queue_retran_cnt == 0) &&
8227 (win_probe_recovered == 0) &&
8230 * huh, this should not happen unless all packets are
8231 * PR-SCTP and marked to skip of course.
8233 if (sctp_fs_audit(asoc)) {
8234 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8235 net->flight_size = 0;
8237 asoc->total_flight = 0;
8238 asoc->total_flight_count = 0;
8239 asoc->sent_queue_retran_cnt = 0;
8240 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8241 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
8242 sctp_flight_size_increase(tp1);
8243 sctp_total_flight_increase(stcb, tp1);
8244 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
8245 asoc->sent_queue_retran_cnt++;
8252 /*********************************************/
8253 /* Here we perform PR-SCTP procedures */
8255 /*********************************************/
8256 /* C1. update advancedPeerAckPoint */
8257 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
8258 asoc->advanced_peer_ack_point = cum_ack;
8260 /* C2. try to further move advancedPeerAckPoint ahead */
8261 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
8262 struct sctp_tmit_chunk *lchk;
8263 uint32_t old_adv_peer_ack_point;
8265 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
8266 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
8267 /* C3. See if we need to send a Fwd-TSN */
8268 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
8271 * ISSUE with ECN, see FWD-TSN processing for notes
8272 * on issues that will occur when the ECN NONCE
8273 * stuff is put into SCTP for cross checking.
8275 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
8277 send_forward_tsn(stcb, asoc);
8279 * ECN Nonce: Disable Nonce Sum check when
8280 * FWD TSN is sent and store resync tsn
8282 asoc->nonce_sum_check = 0;
8283 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
8285 /* try to FR fwd-tsn's that get lost too */
8286 lchk->rec.data.fwd_tsn_cnt++;
8287 if (lchk->rec.data.fwd_tsn_cnt > 3) {
8288 send_forward_tsn(stcb, asoc);
8289 lchk->rec.data.fwd_tsn_cnt = 0;
8294 /* Assure a timer is up */
8295 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8296 stcb->sctp_ep, stcb, lchk->whoTo);
8299 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
8300 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
8302 stcb->asoc.peers_rwnd,
8303 stcb->asoc.total_flight,
8304 stcb->asoc.total_output_queue_size);