2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
48 #define SCTP_CALC_TSN_TO_GAP(gap, tsn, mapping_tsn) do { \
49 if ((compare_with_wrap(tsn, mapping_tsn, MAX_TSN)) || \
50 (tsn == mapping_tsn)) { \
51 gap = tsn - mapping_tsn; \
53 gap = (MAX_TSN - mapping_tsn) + tsn + 1; \
57 #define SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc) do { \
58 if (asoc->mapping_array_base_tsn == asoc->nr_mapping_array_base_tsn) { \
59 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, nr_gap); \
62 SCTP_CALC_TSN_TO_GAP(lgap, tsn, asoc->mapping_array_base_tsn); \
63 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, lgap); \
68 * NOTES: On the outbound side of things I need to check the sack timer to
69 * see if I should generate a sack into the chunk queue (if I have data to
70 * send that is and will be sending it .. for bundling.
72 * The callback in sctp_usrreq.c will get called when the socket is read from.
73 * This will cause sctp_service_queues() to get called on the top entry in
78 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
80 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
83 /* Calculate what the rwnd would be */
85 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
90 * This is really set wrong with respect to a 1-2-m socket. Since
91 * the sb_cc is the count that everyone as put up. When we re-write
92 * sctp_soreceive then we will fix this so that ONLY this
93 * associations data is taken into account.
95 if (stcb->sctp_socket == NULL)
98 if (stcb->asoc.sb_cc == 0 &&
99 asoc->size_on_reasm_queue == 0 &&
100 asoc->size_on_all_streams == 0) {
101 /* Full rwnd granted */
102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
105 /* get actual space */
106 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
109 * take out what has NOT been put on socket queue and we yet hold
112 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
113 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
119 /* what is the overhead of all these rwnd's */
120 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 * If the window gets too small due to ctrl-stuff, reduce it to 1,
123 * even it is 0. SWS engaged
125 if (calc < stcb->asoc.my_rwnd_control_len) {
134 * Build out our readq entry based on the incoming packet.
136 struct sctp_queued_to_read *
137 sctp_build_readq_entry(struct sctp_tcb *stcb,
138 struct sctp_nets *net,
139 uint32_t tsn, uint32_t ppid,
140 uint32_t context, uint16_t stream_no,
141 uint16_t stream_seq, uint8_t flags,
144 struct sctp_queued_to_read *read_queue_e = NULL;
146 sctp_alloc_a_readq(stcb, read_queue_e);
147 if (read_queue_e == NULL) {
150 read_queue_e->sinfo_stream = stream_no;
151 read_queue_e->sinfo_ssn = stream_seq;
152 read_queue_e->sinfo_flags = (flags << 8);
153 read_queue_e->sinfo_ppid = ppid;
154 read_queue_e->sinfo_context = stcb->asoc.context;
155 read_queue_e->sinfo_timetolive = 0;
156 read_queue_e->sinfo_tsn = tsn;
157 read_queue_e->sinfo_cumtsn = tsn;
158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 read_queue_e->whoFrom = net;
160 read_queue_e->length = 0;
161 atomic_add_int(&net->ref_count, 1);
162 read_queue_e->data = dm;
163 read_queue_e->spec_flags = 0;
164 read_queue_e->tail_mbuf = NULL;
165 read_queue_e->aux_data = NULL;
166 read_queue_e->stcb = stcb;
167 read_queue_e->port_from = stcb->rport;
168 read_queue_e->do_not_ref_stcb = 0;
169 read_queue_e->end_added = 0;
170 read_queue_e->some_taken = 0;
171 read_queue_e->pdapi_aborted = 0;
173 return (read_queue_e);
178 * Build out our readq entry based on the incoming packet.
180 static struct sctp_queued_to_read *
181 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
182 struct sctp_tmit_chunk *chk)
184 struct sctp_queued_to_read *read_queue_e = NULL;
186 sctp_alloc_a_readq(stcb, read_queue_e);
187 if (read_queue_e == NULL) {
190 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
191 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
192 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
193 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
194 read_queue_e->sinfo_context = stcb->asoc.context;
195 read_queue_e->sinfo_timetolive = 0;
196 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
197 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
198 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
199 read_queue_e->whoFrom = chk->whoTo;
200 read_queue_e->aux_data = NULL;
201 read_queue_e->length = 0;
202 atomic_add_int(&chk->whoTo->ref_count, 1);
203 read_queue_e->data = chk->data;
204 read_queue_e->tail_mbuf = NULL;
205 read_queue_e->stcb = stcb;
206 read_queue_e->port_from = stcb->rport;
207 read_queue_e->spec_flags = 0;
208 read_queue_e->do_not_ref_stcb = 0;
209 read_queue_e->end_added = 0;
210 read_queue_e->some_taken = 0;
211 read_queue_e->pdapi_aborted = 0;
213 return (read_queue_e);
218 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
219 struct sctp_sndrcvinfo *sinfo)
221 struct sctp_sndrcvinfo *outinfo;
225 int use_extended = 0;
227 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
228 /* user does not want the sndrcv ctl */
231 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
233 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
235 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
239 ret = sctp_get_mbuf_for_msg(len,
240 0, M_DONTWAIT, 1, MT_DATA);
246 /* We need a CMSG header followed by the struct */
247 cmh = mtod(ret, struct cmsghdr *);
248 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
249 cmh->cmsg_level = IPPROTO_SCTP;
251 cmh->cmsg_type = SCTP_EXTRCV;
253 memcpy(outinfo, sinfo, len);
255 cmh->cmsg_type = SCTP_SNDRCV;
259 SCTP_BUF_LEN(ret) = cmh->cmsg_len;
265 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
267 struct sctp_sndrcvinfo *sinfo)
269 struct sctp_sndrcvinfo *outinfo;
273 int use_extended = 0;
275 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
276 /* user does not want the sndrcv ctl */
279 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
281 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
283 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
285 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
290 /* We need a CMSG header followed by the struct */
291 cmh = (struct cmsghdr *)buf;
292 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
293 cmh->cmsg_level = IPPROTO_SCTP;
295 cmh->cmsg_type = SCTP_EXTRCV;
297 memcpy(outinfo, sinfo, len);
299 cmh->cmsg_type = SCTP_SNDRCV;
309 * We are delivering currently from the reassembly queue. We must continue to
310 * deliver until we either: 1) run out of space. 2) run out of sequential
311 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
314 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
316 struct sctp_tmit_chunk *chk;
322 /* EY if any out-of-order delivered, then tag it nr on nr_map */
323 uint32_t nr_tsn, nr_gap;
325 struct sctp_queued_to_read *control, *ctl, *ctlat;
330 cntDel = stream_no = 0;
331 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
332 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
333 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
334 /* socket above is long gone or going.. */
336 asoc->fragmented_delivery_inprogress = 0;
337 chk = TAILQ_FIRST(&asoc->reasmqueue);
339 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
340 asoc->size_on_reasm_queue -= chk->send_size;
341 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
343 * Lose the data pointer, since its in the socket
347 sctp_m_freem(chk->data);
350 /* Now free the address and data */
351 sctp_free_a_chunk(stcb, chk);
352 /* sa_ignore FREED_MEMORY */
353 chk = TAILQ_FIRST(&asoc->reasmqueue);
357 SCTP_TCB_LOCK_ASSERT(stcb);
359 chk = TAILQ_FIRST(&asoc->reasmqueue);
363 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
364 /* Can't deliver more :< */
367 stream_no = chk->rec.data.stream_number;
368 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
369 if (nxt_todel != chk->rec.data.stream_seq &&
370 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
372 * Not the next sequence to deliver in its stream OR
377 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
379 control = sctp_build_readq_entry_chk(stcb, chk);
380 if (control == NULL) {
384 /* save it off for our future deliveries */
385 stcb->asoc.control_pdapi = control;
386 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
390 sctp_add_to_readq(stcb->sctp_ep,
391 stcb, control, &stcb->sctp_socket->so_rcv, end,
392 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
395 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
399 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
400 stcb->asoc.control_pdapi,
401 chk->data, end, chk->rec.data.TSN_seq,
402 &stcb->sctp_socket->so_rcv)) {
404 * something is very wrong, either
405 * control_pdapi is NULL, or the tail_mbuf
406 * is corrupt, or there is a EOM already on
409 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
413 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
414 panic("This should not happen control_pdapi NULL?");
416 /* if we did not panic, it was a EOM */
417 panic("Bad chunking ??");
419 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
420 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
422 SCTP_PRINTF("Bad chunking ??\n");
423 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
431 /* pull it we did it */
432 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
434 * EY this is the chunk that should be tagged nr gapped
435 * calculate the gap and such then tag this TSN nr
436 * chk->rec.data.TSN_seq
439 * EY!-TODO- this tsn should be tagged nr only if it is
440 * out-of-order, the if statement should be modified
442 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
444 nr_tsn = chk->rec.data.TSN_seq;
445 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
446 if ((nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3)) ||
447 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
449 * EY The 1st should never happen, as in
450 * process_a_data_chunk method this check
454 * EY The 2nd should never happen, because
455 * nr_mapping_array is always expanded when
456 * mapping_array is expanded
458 printf("Impossible nr_gap ack range failed\n");
460 SCTP_TCB_LOCK_ASSERT(stcb);
461 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
462 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
463 if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
464 asoc->highest_tsn_inside_nr_map = nr_tsn;
467 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
468 asoc->fragmented_delivery_inprogress = 0;
469 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
470 asoc->strmin[stream_no].last_sequence_delivered++;
472 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
473 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
475 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
477 * turn the flag back on since we just delivered
480 asoc->fragmented_delivery_inprogress = 1;
482 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
483 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
484 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
485 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
487 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
488 asoc->size_on_reasm_queue -= chk->send_size;
489 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
490 /* free up the chk */
492 sctp_free_a_chunk(stcb, chk);
494 if (asoc->fragmented_delivery_inprogress == 0) {
496 * Now lets see if we can deliver the next one on
499 struct sctp_stream_in *strm;
501 strm = &asoc->strmin[stream_no];
502 nxt_todel = strm->last_sequence_delivered + 1;
503 ctl = TAILQ_FIRST(&strm->inqueue);
504 if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
505 while (ctl != NULL) {
506 /* Deliver more if we can. */
507 if (nxt_todel == ctl->sinfo_ssn) {
508 ctlat = TAILQ_NEXT(ctl, next);
509 TAILQ_REMOVE(&strm->inqueue, ctl, next);
510 asoc->size_on_all_streams -= ctl->length;
511 sctp_ucount_decr(asoc->cnt_on_all_streams);
512 strm->last_sequence_delivered++;
517 nr_tsn = ctl->sinfo_tsn;
518 sctp_add_to_readq(stcb->sctp_ep, stcb,
520 &stcb->sctp_socket->so_rcv, 1,
521 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
523 * EY -now something is
524 * delivered, calculate
525 * nr_gap and tag this tsn
528 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
529 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
530 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
531 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
532 printf("Impossible NR gap calculation?\n");
565 SCTP_TCB_LOCK_ASSERT(stcb);
566 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
567 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
568 if (compare_with_wrap(nr_tsn,
569 asoc->highest_tsn_inside_nr_map,
571 asoc->highest_tsn_inside_nr_map = nr_tsn;
578 nxt_todel = strm->last_sequence_delivered + 1;
583 /* sa_ignore FREED_MEMORY */
584 chk = TAILQ_FIRST(&asoc->reasmqueue);
589 * Queue the chunk either right into the socket buffer if it is the next one
590 * to go OR put it in the correct place in the delivery queue. If we do
591 * append to the so_buf, keep doing so until we are out of order. One big
592 * question still remains, what to do when the socket buffer is FULL??
595 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
596 struct sctp_queued_to_read *control, int *abort_flag)
599 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
600 * all the data in one stream this could happen quite rapidly. One
601 * could use the TSN to keep track of things, but this scheme breaks
602 * down in the other type of stream useage that could occur. Send a
603 * single msg to stream 0, send 4Billion messages to stream 1, now
604 * send a message to stream 0. You have a situation where the TSN
605 * has wrapped but not in the stream. Is this worth worrying about
606 * or should we just change our queue sort at the bottom to be by
609 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
610 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
611 * assignment this could happen... and I don't see how this would be
612 * a violation. So for now I am undecided an will leave the sort by
613 * SSN alone. Maybe a hybred approach is the answer
616 struct sctp_stream_in *strm;
617 struct sctp_queued_to_read *at;
622 /* EY- will be used to calculate nr-gap for a tsn */
623 uint32_t nr_tsn, nr_gap;
626 asoc->size_on_all_streams += control->length;
627 sctp_ucount_incr(asoc->cnt_on_all_streams);
628 strm = &asoc->strmin[control->sinfo_stream];
629 nxt_todel = strm->last_sequence_delivered + 1;
630 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
631 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
633 SCTPDBG(SCTP_DEBUG_INDATA1,
634 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
635 (uint32_t) control->sinfo_stream,
636 (uint32_t) strm->last_sequence_delivered,
637 (uint32_t) nxt_todel);
638 if (compare_with_wrap(strm->last_sequence_delivered,
639 control->sinfo_ssn, MAX_SEQ) ||
640 (strm->last_sequence_delivered == control->sinfo_ssn)) {
641 /* The incoming sseq is behind where we last delivered? */
642 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
643 control->sinfo_ssn, strm->last_sequence_delivered);
646 * throw it in the stream so it gets cleaned up in
647 * association destruction
649 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
650 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
651 0, M_DONTWAIT, 1, MT_DATA);
653 struct sctp_paramhdr *ph;
656 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
657 (sizeof(uint32_t) * 3);
658 ph = mtod(oper, struct sctp_paramhdr *);
659 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
660 ph->param_length = htons(SCTP_BUF_LEN(oper));
661 ippp = (uint32_t *) (ph + 1);
662 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
664 *ippp = control->sinfo_tsn;
666 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
668 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
669 sctp_abort_an_association(stcb->sctp_ep, stcb,
670 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
676 if (nxt_todel == control->sinfo_ssn) {
677 /* can be delivered right away? */
678 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
679 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
681 /* EY it wont be queued if it could be delivered directly */
683 asoc->size_on_all_streams -= control->length;
684 sctp_ucount_decr(asoc->cnt_on_all_streams);
685 strm->last_sequence_delivered++;
686 /* EY will be used to calculate nr-gap */
687 nr_tsn = control->sinfo_tsn;
688 sctp_add_to_readq(stcb->sctp_ep, stcb,
690 &stcb->sctp_socket->so_rcv, 1,
691 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
693 * EY this is the chunk that should be tagged nr gapped
694 * calculate the gap and such then tag this TSN nr
695 * chk->rec.data.TSN_seq
697 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
698 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
699 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
700 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
701 printf("Impossible nr_tsn set 2?\n");
703 * EY The 1st should never happen, as in
704 * process_a_data_chunk method this check
708 * EY The 2nd should never happen, because
709 * nr_mapping_array is always expanded when
710 * mapping_array is expanded
713 SCTP_TCB_LOCK_ASSERT(stcb);
714 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
715 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
716 if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
717 asoc->highest_tsn_inside_nr_map = nr_tsn;
720 control = TAILQ_FIRST(&strm->inqueue);
721 while (control != NULL) {
723 nxt_todel = strm->last_sequence_delivered + 1;
724 if (nxt_todel == control->sinfo_ssn) {
725 at = TAILQ_NEXT(control, next);
726 TAILQ_REMOVE(&strm->inqueue, control, next);
727 asoc->size_on_all_streams -= control->length;
728 sctp_ucount_decr(asoc->cnt_on_all_streams);
729 strm->last_sequence_delivered++;
731 * We ignore the return of deliver_data here
732 * since we always can hold the chunk on the
733 * d-queue. And we have a finite number that
734 * can be delivered from the strq.
736 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
737 sctp_log_strm_del(control, NULL,
738 SCTP_STR_LOG_FROM_IMMED_DEL);
740 /* EY will be used to calculate nr-gap */
741 nr_tsn = control->sinfo_tsn;
742 sctp_add_to_readq(stcb->sctp_ep, stcb,
744 &stcb->sctp_socket->so_rcv, 1,
745 SCTP_READ_LOCK_NOT_HELD,
748 * EY this is the chunk that should be
749 * tagged nr gapped calculate the gap and
750 * such then tag this TSN nr
751 * chk->rec.data.TSN_seq
753 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
754 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
755 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
756 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
757 printf("Impossible nr TSN set 3?\n");
759 * EY The 1st should never
761 * process_a_data_chunk
762 * method this check should
766 * EY The 2nd should never
768 * nr_mapping_array is
769 * always expanded when
770 * mapping_array is expanded
773 SCTP_TCB_LOCK_ASSERT(stcb);
774 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
775 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
776 if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
778 asoc->highest_tsn_inside_nr_map = nr_tsn;
789 * Ok, we did not deliver this guy, find the correct place
790 * to put it on the queue.
792 if ((compare_with_wrap(asoc->cumulative_tsn,
793 control->sinfo_tsn, MAX_TSN)) ||
794 (control->sinfo_tsn == asoc->cumulative_tsn)) {
797 if (TAILQ_EMPTY(&strm->inqueue)) {
799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
800 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
802 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
804 TAILQ_FOREACH(at, &strm->inqueue, next) {
805 if (compare_with_wrap(at->sinfo_ssn,
806 control->sinfo_ssn, MAX_SEQ)) {
808 * one in queue is bigger than the
809 * new one, insert before this one
811 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
812 sctp_log_strm_del(control, at,
813 SCTP_STR_LOG_FROM_INSERT_MD);
815 TAILQ_INSERT_BEFORE(at, control, next);
817 } else if (at->sinfo_ssn == control->sinfo_ssn) {
819 * Gak, He sent me a duplicate str
823 * foo bar, I guess I will just free
824 * this new guy, should we abort
825 * too? FIX ME MAYBE? Or it COULD be
826 * that the SSN's have wrapped.
827 * Maybe I should compare to TSN
828 * somehow... sigh for now just blow
833 sctp_m_freem(control->data);
834 control->data = NULL;
835 asoc->size_on_all_streams -= control->length;
836 sctp_ucount_decr(asoc->cnt_on_all_streams);
837 if (control->whoFrom)
838 sctp_free_remote_addr(control->whoFrom);
839 control->whoFrom = NULL;
840 sctp_free_a_readq(stcb, control);
843 if (TAILQ_NEXT(at, next) == NULL) {
845 * We are at the end, insert
848 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
849 sctp_log_strm_del(control, at,
850 SCTP_STR_LOG_FROM_INSERT_TL);
852 TAILQ_INSERT_AFTER(&strm->inqueue,
863 * Returns two things: You get the total size of the deliverable parts of the
864 * first fragmented message on the reassembly queue. And you get a 1 back if
865 * all of the message is ready or a 0 back if the message is still incomplete
868 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
870 struct sctp_tmit_chunk *chk;
874 chk = TAILQ_FIRST(&asoc->reasmqueue);
876 /* nothing on the queue */
879 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
880 /* Not a first on the queue */
883 tsn = chk->rec.data.TSN_seq;
885 if (tsn != chk->rec.data.TSN_seq) {
888 *t_size += chk->send_size;
889 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
893 chk = TAILQ_NEXT(chk, sctp_next);
899 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
901 struct sctp_tmit_chunk *chk;
906 chk = TAILQ_FIRST(&asoc->reasmqueue);
909 asoc->size_on_reasm_queue = 0;
910 asoc->cnt_on_reasm_queue = 0;
913 if (asoc->fragmented_delivery_inprogress == 0) {
915 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
916 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
917 (nxt_todel == chk->rec.data.stream_seq ||
918 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
920 * Yep the first one is here and its ok to deliver
923 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
924 (tsize >= stcb->sctp_ep->partial_delivery_point))) {
927 * Yes, we setup to start reception, by
928 * backing down the TSN just in case we
929 * can't deliver. If we
931 asoc->fragmented_delivery_inprogress = 1;
932 asoc->tsn_last_delivered =
933 chk->rec.data.TSN_seq - 1;
935 chk->rec.data.stream_number;
936 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
937 asoc->pdapi_ppid = chk->rec.data.payloadtype;
938 asoc->fragment_flags = chk->rec.data.rcv_flags;
939 sctp_service_reassembly(stcb, asoc);
944 * Service re-assembly will deliver stream data queued at
945 * the end of fragmented delivery.. but it wont know to go
946 * back and call itself again... we do that here with the
949 sctp_service_reassembly(stcb, asoc);
950 if (asoc->fragmented_delivery_inprogress == 0) {
952 * finished our Fragmented delivery, could be more
961 * Dump onto the re-assembly queue, in its proper place. After dumping on the
962 * queue, see if anthing can be delivered. If so pull it off (or as much as
963 * we can. If we run out of space then we must dump what we can and set the
964 * appropriate flag to say we queued what we could.
967 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
968 struct sctp_tmit_chunk *chk, int *abort_flag)
971 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
973 struct sctp_tmit_chunk *at, *prev, *next;
976 cum_ackp1 = asoc->tsn_last_delivered + 1;
977 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
978 /* This is the first one on the queue */
979 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
981 * we do not check for delivery of anything when only one
984 asoc->size_on_reasm_queue = chk->send_size;
985 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
986 if (chk->rec.data.TSN_seq == cum_ackp1) {
987 if (asoc->fragmented_delivery_inprogress == 0 &&
988 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
989 SCTP_DATA_FIRST_FRAG) {
991 * An empty queue, no delivery inprogress,
992 * we hit the next one and it does NOT have
993 * a FIRST fragment mark.
995 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
996 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
997 0, M_DONTWAIT, 1, MT_DATA);
1000 struct sctp_paramhdr *ph;
1003 SCTP_BUF_LEN(oper) =
1004 sizeof(struct sctp_paramhdr) +
1005 (sizeof(uint32_t) * 3);
1006 ph = mtod(oper, struct sctp_paramhdr *);
1008 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1009 ph->param_length = htons(SCTP_BUF_LEN(oper));
1010 ippp = (uint32_t *) (ph + 1);
1011 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
1013 *ippp = chk->rec.data.TSN_seq;
1015 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1018 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
1019 sctp_abort_an_association(stcb->sctp_ep, stcb,
1020 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1022 } else if (asoc->fragmented_delivery_inprogress &&
1023 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1025 * We are doing a partial delivery and the
1026 * NEXT chunk MUST be either the LAST or
1027 * MIDDLE fragment NOT a FIRST
1029 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1030 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1031 0, M_DONTWAIT, 1, MT_DATA);
1033 struct sctp_paramhdr *ph;
1036 SCTP_BUF_LEN(oper) =
1037 sizeof(struct sctp_paramhdr) +
1038 (3 * sizeof(uint32_t));
1039 ph = mtod(oper, struct sctp_paramhdr *);
1041 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1042 ph->param_length = htons(SCTP_BUF_LEN(oper));
1043 ippp = (uint32_t *) (ph + 1);
1044 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
1046 *ippp = chk->rec.data.TSN_seq;
1048 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1050 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
1051 sctp_abort_an_association(stcb->sctp_ep, stcb,
1052 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1054 } else if (asoc->fragmented_delivery_inprogress) {
1056 * Here we are ok with a MIDDLE or LAST
1059 if (chk->rec.data.stream_number !=
1060 asoc->str_of_pdapi) {
1061 /* Got to be the right STR No */
1062 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
1063 chk->rec.data.stream_number,
1064 asoc->str_of_pdapi);
1065 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1066 0, M_DONTWAIT, 1, MT_DATA);
1068 struct sctp_paramhdr *ph;
1071 SCTP_BUF_LEN(oper) =
1072 sizeof(struct sctp_paramhdr) +
1073 (sizeof(uint32_t) * 3);
1075 struct sctp_paramhdr *);
1077 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1079 htons(SCTP_BUF_LEN(oper));
1080 ippp = (uint32_t *) (ph + 1);
1081 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1083 *ippp = chk->rec.data.TSN_seq;
1085 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1087 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
1088 sctp_abort_an_association(stcb->sctp_ep,
1089 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1091 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1092 SCTP_DATA_UNORDERED &&
1093 chk->rec.data.stream_seq !=
1094 asoc->ssn_of_pdapi) {
1095 /* Got to be the right STR Seq */
1096 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1097 chk->rec.data.stream_seq,
1098 asoc->ssn_of_pdapi);
1099 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1100 0, M_DONTWAIT, 1, MT_DATA);
1102 struct sctp_paramhdr *ph;
1105 SCTP_BUF_LEN(oper) =
1106 sizeof(struct sctp_paramhdr) +
1107 (3 * sizeof(uint32_t));
1109 struct sctp_paramhdr *);
1111 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1113 htons(SCTP_BUF_LEN(oper));
1114 ippp = (uint32_t *) (ph + 1);
1115 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1117 *ippp = chk->rec.data.TSN_seq;
1119 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1122 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1123 sctp_abort_an_association(stcb->sctp_ep,
1124 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1131 /* Find its place */
1132 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1133 if (compare_with_wrap(at->rec.data.TSN_seq,
1134 chk->rec.data.TSN_seq, MAX_TSN)) {
1136 * one in queue is bigger than the new one, insert
1140 asoc->size_on_reasm_queue += chk->send_size;
1141 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1143 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1145 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1146 /* Gak, He sent me a duplicate str seq number */
1148 * foo bar, I guess I will just free this new guy,
1149 * should we abort too? FIX ME MAYBE? Or it COULD be
1150 * that the SSN's have wrapped. Maybe I should
1151 * compare to TSN somehow... sigh for now just blow
1155 sctp_m_freem(chk->data);
1158 sctp_free_a_chunk(stcb, chk);
1161 last_flags = at->rec.data.rcv_flags;
1162 last_tsn = at->rec.data.TSN_seq;
1164 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1166 * We are at the end, insert it after this
1169 /* check it first */
1170 asoc->size_on_reasm_queue += chk->send_size;
1171 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1172 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1177 /* Now the audits */
1179 prev_tsn = chk->rec.data.TSN_seq - 1;
1180 if (prev_tsn == prev->rec.data.TSN_seq) {
1182 * Ok the one I am dropping onto the end is the
1183 * NEXT. A bit of valdiation here.
1185 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1186 SCTP_DATA_FIRST_FRAG ||
1187 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1188 SCTP_DATA_MIDDLE_FRAG) {
1190 * Insert chk MUST be a MIDDLE or LAST
1193 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1194 SCTP_DATA_FIRST_FRAG) {
1195 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1196 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1197 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1198 0, M_DONTWAIT, 1, MT_DATA);
1200 struct sctp_paramhdr *ph;
1203 SCTP_BUF_LEN(oper) =
1204 sizeof(struct sctp_paramhdr) +
1205 (3 * sizeof(uint32_t));
1207 struct sctp_paramhdr *);
1209 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1211 htons(SCTP_BUF_LEN(oper));
1212 ippp = (uint32_t *) (ph + 1);
1213 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1215 *ippp = chk->rec.data.TSN_seq;
1217 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1220 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1221 sctp_abort_an_association(stcb->sctp_ep,
1222 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1226 if (chk->rec.data.stream_number !=
1227 prev->rec.data.stream_number) {
1229 * Huh, need the correct STR here,
1230 * they must be the same.
1232 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1233 chk->rec.data.stream_number,
1234 prev->rec.data.stream_number);
1235 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1236 0, M_DONTWAIT, 1, MT_DATA);
1238 struct sctp_paramhdr *ph;
1241 SCTP_BUF_LEN(oper) =
1242 sizeof(struct sctp_paramhdr) +
1243 (3 * sizeof(uint32_t));
1245 struct sctp_paramhdr *);
1247 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1249 htons(SCTP_BUF_LEN(oper));
1250 ippp = (uint32_t *) (ph + 1);
1251 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1253 *ippp = chk->rec.data.TSN_seq;
1255 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1257 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1258 sctp_abort_an_association(stcb->sctp_ep,
1259 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1264 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1265 chk->rec.data.stream_seq !=
1266 prev->rec.data.stream_seq) {
1268 * Huh, need the correct STR here,
1269 * they must be the same.
1271 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1272 chk->rec.data.stream_seq,
1273 prev->rec.data.stream_seq);
1274 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1275 0, M_DONTWAIT, 1, MT_DATA);
1277 struct sctp_paramhdr *ph;
1280 SCTP_BUF_LEN(oper) =
1281 sizeof(struct sctp_paramhdr) +
1282 (3 * sizeof(uint32_t));
1284 struct sctp_paramhdr *);
1286 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1288 htons(SCTP_BUF_LEN(oper));
1289 ippp = (uint32_t *) (ph + 1);
1290 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1292 *ippp = chk->rec.data.TSN_seq;
1294 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1296 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1297 sctp_abort_an_association(stcb->sctp_ep,
1298 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1303 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1304 SCTP_DATA_LAST_FRAG) {
1305 /* Insert chk MUST be a FIRST */
1306 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1307 SCTP_DATA_FIRST_FRAG) {
1308 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1309 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1310 0, M_DONTWAIT, 1, MT_DATA);
1312 struct sctp_paramhdr *ph;
1315 SCTP_BUF_LEN(oper) =
1316 sizeof(struct sctp_paramhdr) +
1317 (3 * sizeof(uint32_t));
1319 struct sctp_paramhdr *);
1321 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1323 htons(SCTP_BUF_LEN(oper));
1324 ippp = (uint32_t *) (ph + 1);
1325 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1327 *ippp = chk->rec.data.TSN_seq;
1329 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1332 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1333 sctp_abort_an_association(stcb->sctp_ep,
1334 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1343 post_tsn = chk->rec.data.TSN_seq + 1;
1344 if (post_tsn == next->rec.data.TSN_seq) {
1346 * Ok the one I am inserting ahead of is my NEXT
1347 * one. A bit of valdiation here.
1349 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1350 /* Insert chk MUST be a last fragment */
1351 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1352 != SCTP_DATA_LAST_FRAG) {
1353 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1354 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1355 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1356 0, M_DONTWAIT, 1, MT_DATA);
1358 struct sctp_paramhdr *ph;
1361 SCTP_BUF_LEN(oper) =
1362 sizeof(struct sctp_paramhdr) +
1363 (3 * sizeof(uint32_t));
1365 struct sctp_paramhdr *);
1367 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1369 htons(SCTP_BUF_LEN(oper));
1370 ippp = (uint32_t *) (ph + 1);
1371 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1373 *ippp = chk->rec.data.TSN_seq;
1375 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1377 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1378 sctp_abort_an_association(stcb->sctp_ep,
1379 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1384 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1385 SCTP_DATA_MIDDLE_FRAG ||
1386 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1387 SCTP_DATA_LAST_FRAG) {
1389 * Insert chk CAN be MIDDLE or FIRST NOT
1392 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1393 SCTP_DATA_LAST_FRAG) {
1394 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1395 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1396 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1397 0, M_DONTWAIT, 1, MT_DATA);
1399 struct sctp_paramhdr *ph;
1402 SCTP_BUF_LEN(oper) =
1403 sizeof(struct sctp_paramhdr) +
1404 (3 * sizeof(uint32_t));
1406 struct sctp_paramhdr *);
1408 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1410 htons(SCTP_BUF_LEN(oper));
1411 ippp = (uint32_t *) (ph + 1);
1412 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1414 *ippp = chk->rec.data.TSN_seq;
1416 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1419 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1420 sctp_abort_an_association(stcb->sctp_ep,
1421 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1426 if (chk->rec.data.stream_number !=
1427 next->rec.data.stream_number) {
1429 * Huh, need the correct STR here,
1430 * they must be the same.
1432 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1433 chk->rec.data.stream_number,
1434 next->rec.data.stream_number);
1435 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1436 0, M_DONTWAIT, 1, MT_DATA);
1438 struct sctp_paramhdr *ph;
1441 SCTP_BUF_LEN(oper) =
1442 sizeof(struct sctp_paramhdr) +
1443 (3 * sizeof(uint32_t));
1445 struct sctp_paramhdr *);
1447 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1449 htons(SCTP_BUF_LEN(oper));
1450 ippp = (uint32_t *) (ph + 1);
1451 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1453 *ippp = chk->rec.data.TSN_seq;
1455 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1458 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1459 sctp_abort_an_association(stcb->sctp_ep,
1460 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1465 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1466 chk->rec.data.stream_seq !=
1467 next->rec.data.stream_seq) {
1469 * Huh, need the correct STR here,
1470 * they must be the same.
1472 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1473 chk->rec.data.stream_seq,
1474 next->rec.data.stream_seq);
1475 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1476 0, M_DONTWAIT, 1, MT_DATA);
1478 struct sctp_paramhdr *ph;
1481 SCTP_BUF_LEN(oper) =
1482 sizeof(struct sctp_paramhdr) +
1483 (3 * sizeof(uint32_t));
1485 struct sctp_paramhdr *);
1487 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1489 htons(SCTP_BUF_LEN(oper));
1490 ippp = (uint32_t *) (ph + 1);
1491 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1493 *ippp = chk->rec.data.TSN_seq;
1495 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1497 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1498 sctp_abort_an_association(stcb->sctp_ep,
1499 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1507 /* Do we need to do some delivery? check */
1508 sctp_deliver_reasm_check(stcb, asoc);
1512 * This is an unfortunate routine. It checks to make sure a evil guy is not
1513 * stuffing us full of bad packet fragments. A broken peer could also do this
1514 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1518 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1521 struct sctp_tmit_chunk *at;
1524 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1525 if (compare_with_wrap(TSN_seq,
1526 at->rec.data.TSN_seq, MAX_TSN)) {
1527 /* is it one bigger? */
1528 tsn_est = at->rec.data.TSN_seq + 1;
1529 if (tsn_est == TSN_seq) {
1530 /* yep. It better be a last then */
1531 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1532 SCTP_DATA_LAST_FRAG) {
1534 * Ok this guy belongs next to a guy
1535 * that is NOT last, it should be a
1536 * middle/last, not a complete
1542 * This guy is ok since its a LAST
1543 * and the new chunk is a fully
1544 * self- contained one.
1549 } else if (TSN_seq == at->rec.data.TSN_seq) {
1550 /* Software error since I have a dup? */
1554 * Ok, 'at' is larger than new chunk but does it
1555 * need to be right before it.
1557 tsn_est = TSN_seq + 1;
1558 if (tsn_est == at->rec.data.TSN_seq) {
1559 /* Yep, It better be a first */
1560 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1561 SCTP_DATA_FIRST_FRAG) {
1574 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1575 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1576 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1577 int *break_flag, int last_chunk)
1579 /* Process a data chunk */
1580 /* struct sctp_tmit_chunk *chk; */
1581 struct sctp_tmit_chunk *chk;
1584 /* EY - for nr_sack */
1588 int need_reasm_check = 0;
1589 uint16_t strmno, strmseq;
1591 struct sctp_queued_to_read *control;
1593 uint32_t protocol_id;
1594 uint8_t chunk_flags;
1595 struct sctp_stream_reset_list *liste;
1598 tsn = ntohl(ch->dp.tsn);
1599 chunk_flags = ch->ch.chunk_flags;
1600 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1601 asoc->send_sack = 1;
1603 protocol_id = ch->dp.protocol_id;
1604 ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
1605 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1606 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1611 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1612 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1613 asoc->cumulative_tsn == tsn) {
1614 /* It is a duplicate */
1615 SCTP_STAT_INCR(sctps_recvdupdata);
1616 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1617 /* Record a dup for the next outbound sack */
1618 asoc->dup_tsns[asoc->numduptsns] = tsn;
1621 asoc->send_sack = 1;
1624 /* Calculate the number of TSN's between the base and this TSN */
1625 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1626 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1627 /* Can't hold the bit in the mapping at max array, toss it */
1630 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1631 SCTP_TCB_LOCK_ASSERT(stcb);
1632 if (sctp_expand_mapping_array(asoc, gap)) {
1633 /* Can't expand, drop it */
1637 /* EY - for nr_sack */
1640 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1643 /* See if we have received this one already */
1644 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1645 SCTP_STAT_INCR(sctps_recvdupdata);
1646 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1647 /* Record a dup for the next outbound sack */
1648 asoc->dup_tsns[asoc->numduptsns] = tsn;
1651 asoc->send_sack = 1;
1655 * Check to see about the GONE flag, duplicates would cause a sack
1656 * to be sent up above
1658 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1659 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1660 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1663 * wait a minute, this guy is gone, there is no longer a
1664 * receiver. Send peer an ABORT!
1666 struct mbuf *op_err;
1668 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1669 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1674 * Now before going further we see if there is room. If NOT then we
1675 * MAY let one through only IF this TSN is the one we are waiting
1676 * for on a partial delivery API.
1679 /* now do the tests */
1680 if (((asoc->cnt_on_all_streams +
1681 asoc->cnt_on_reasm_queue +
1682 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1683 (((int)asoc->my_rwnd) <= 0)) {
1685 * When we have NO room in the rwnd we check to make sure
1686 * the reader is doing its job...
1688 if (stcb->sctp_socket->so_rcv.sb_cc) {
1689 /* some to read, wake-up */
1690 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1693 so = SCTP_INP_SO(stcb->sctp_ep);
1694 atomic_add_int(&stcb->asoc.refcnt, 1);
1695 SCTP_TCB_UNLOCK(stcb);
1696 SCTP_SOCKET_LOCK(so, 1);
1697 SCTP_TCB_LOCK(stcb);
1698 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1699 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1700 /* assoc was freed while we were unlocked */
1701 SCTP_SOCKET_UNLOCK(so, 1);
1705 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1706 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1707 SCTP_SOCKET_UNLOCK(so, 1);
1710 /* now is it in the mapping array of what we have accepted? */
1711 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1712 /* Nope not in the valid range dump it */
1713 sctp_set_rwnd(stcb, asoc);
1714 if ((asoc->cnt_on_all_streams +
1715 asoc->cnt_on_reasm_queue +
1716 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1717 SCTP_STAT_INCR(sctps_datadropchklmt);
1719 SCTP_STAT_INCR(sctps_datadroprwnd);
1726 strmno = ntohs(ch->dp.stream_id);
1727 if (strmno >= asoc->streamincnt) {
1728 struct sctp_paramhdr *phdr;
1731 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1732 0, M_DONTWAIT, 1, MT_DATA);
1734 /* add some space up front so prepend will work well */
1735 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1736 phdr = mtod(mb, struct sctp_paramhdr *);
1738 * Error causes are just param's and this one has
1739 * two back to back phdr, one with the error type
1740 * and size, the other with the streamid and a rsvd
1742 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1743 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1744 phdr->param_length =
1745 htons(sizeof(struct sctp_paramhdr) * 2);
1747 /* We insert the stream in the type field */
1748 phdr->param_type = ch->dp.stream_id;
1749 /* And set the length to 0 for the rsvd field */
1750 phdr->param_length = 0;
1751 sctp_queue_op_err(stcb, mb);
1753 SCTP_STAT_INCR(sctps_badsid);
1754 SCTP_TCB_LOCK_ASSERT(stcb);
1755 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1756 /* EY set this tsn present in nr_sack's nr_mapping_array */
1757 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1758 SCTP_TCB_LOCK_ASSERT(stcb);
1759 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1760 SCTP_REVERSE_OUT_TSN_PRES(gap, tsn, asoc);
1762 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1763 /* we have a new high score */
1764 asoc->highest_tsn_inside_map = tsn;
1765 /* EY nr_sack version of the above */
1766 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
1767 asoc->highest_tsn_inside_nr_map = tsn;
1768 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1769 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1772 if (tsn == (asoc->cumulative_tsn + 1)) {
1773 /* Update cum-ack */
1774 asoc->cumulative_tsn = tsn;
1779 * Before we continue lets validate that we are not being fooled by
1780 * an evil attacker. We can only have 4k chunks based on our TSN
1781 * spread allowed by the mapping array 512 * 8 bits, so there is no
1782 * way our stream sequence numbers could have wrapped. We of course
1783 * only validate the FIRST fragment so the bit must be set.
1785 strmseq = ntohs(ch->dp.stream_sequence);
1786 #ifdef SCTP_ASOCLOG_OF_TSNS
1787 SCTP_TCB_LOCK_ASSERT(stcb);
1788 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1789 asoc->tsn_in_at = 0;
1790 asoc->tsn_in_wrapped = 1;
1792 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1793 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1794 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1795 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1796 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1797 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1798 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1799 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1802 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1803 (TAILQ_EMPTY(&asoc->resetHead)) &&
1804 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1805 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1806 strmseq, MAX_SEQ) ||
1807 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1808 /* The incoming sseq is behind where we last delivered? */
1809 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1810 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1811 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1812 0, M_DONTWAIT, 1, MT_DATA);
1814 struct sctp_paramhdr *ph;
1817 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1818 (3 * sizeof(uint32_t));
1819 ph = mtod(oper, struct sctp_paramhdr *);
1820 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1821 ph->param_length = htons(SCTP_BUF_LEN(oper));
1822 ippp = (uint32_t *) (ph + 1);
1823 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1827 *ippp = ((strmno << 16) | strmseq);
1830 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1831 sctp_abort_an_association(stcb->sctp_ep, stcb,
1832 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1836 /************************************
1837 * From here down we may find ch-> invalid
1838 * so its a good idea NOT to use it.
1839 *************************************/
1841 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1842 if (last_chunk == 0) {
1843 dmbuf = SCTP_M_COPYM(*m,
1844 (offset + sizeof(struct sctp_data_chunk)),
1845 the_len, M_DONTWAIT);
1846 #ifdef SCTP_MBUF_LOGGING
1847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1852 if (SCTP_BUF_IS_EXTENDED(mat)) {
1853 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1855 mat = SCTP_BUF_NEXT(mat);
1860 /* We can steal the last chunk */
1864 /* lop off the top part */
1865 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1866 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1867 l_len = SCTP_BUF_LEN(dmbuf);
1870 * need to count up the size hopefully does not hit
1878 l_len += SCTP_BUF_LEN(lat);
1879 lat = SCTP_BUF_NEXT(lat);
1882 if (l_len > the_len) {
1883 /* Trim the end round bytes off too */
1884 m_adj(dmbuf, -(l_len - the_len));
1887 if (dmbuf == NULL) {
1888 SCTP_STAT_INCR(sctps_nomem);
1891 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1892 asoc->fragmented_delivery_inprogress == 0 &&
1893 TAILQ_EMPTY(&asoc->resetHead) &&
1895 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1896 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1897 /* Candidate for express delivery */
1899 * Its not fragmented, No PD-API is up, Nothing in the
1900 * delivery queue, Its un-ordered OR ordered and the next to
1901 * deliver AND nothing else is stuck on the stream queue,
1902 * And there is room for it in the socket buffer. Lets just
1903 * stuff it up the buffer....
1906 /* It would be nice to avoid this copy if we could :< */
1907 sctp_alloc_a_readq(stcb, control);
1908 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1914 if (control == NULL) {
1915 goto failed_express_del;
1917 sctp_add_to_readq(stcb->sctp_ep, stcb,
1918 control, &stcb->sctp_socket->so_rcv,
1919 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1922 * EY here I should check if this delivered tsn is
1923 * out_of_order, if yes then update the nr_map
1925 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1927 * EY check if the mapping_array and nr_mapping
1928 * array are consistent
1930 if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
1933 * sctp_process_a_data_chunk(5): Something
1934 * is wrong the map base tsn" "\nEY-and
1935 * nr_map base tsn should be equal.");
1937 /* EY debugging block */
1940 * printf("\nEY-Calculating an
1941 * nr_gap!!\nmapping_array_size = %d
1942 * nr_mapping_array_size = %d"
1943 * "\nEY-mapping_array_base = %d
1944 * nr_mapping_array_base =
1945 * %d\nEY-highest_tsn_inside_map = %d"
1946 * "highest_tsn_inside_nr_map = %d\nEY-TSN =
1947 * %d nr_gap = %d",asoc->mapping_array_size,
1948 * asoc->nr_mapping_array_size,
1949 * asoc->mapping_array_base_tsn,
1950 * asoc->nr_mapping_array_base_tsn,
1951 * asoc->highest_tsn_inside_map,
1952 * asoc->highest_tsn_inside_nr_map,tsn,nr_gap
1956 /* EY - not %100 sure about the lock thing */
1957 SCTP_TCB_LOCK_ASSERT(stcb);
1958 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
1959 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
1960 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
1961 asoc->highest_tsn_inside_nr_map = tsn;
1963 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1964 /* for ordered, bump what we delivered */
1965 asoc->strmin[strmno].last_sequence_delivered++;
1967 SCTP_STAT_INCR(sctps_recvexpress);
1968 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1969 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1970 SCTP_STR_LOG_FROM_EXPRS_DEL);
1973 goto finish_express_del;
1976 /* If we reach here this is a new chunk */
1979 /* Express for fragmented delivery? */
1980 if ((asoc->fragmented_delivery_inprogress) &&
1981 (stcb->asoc.control_pdapi) &&
1982 (asoc->str_of_pdapi == strmno) &&
1983 (asoc->ssn_of_pdapi == strmseq)
1985 control = stcb->asoc.control_pdapi;
1986 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1987 /* Can't be another first? */
1988 goto failed_pdapi_express_del;
1990 if (tsn == (control->sinfo_tsn + 1)) {
1991 /* Yep, we can add it on */
1995 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1998 cumack = asoc->cumulative_tsn;
1999 if ((cumack + 1) == tsn)
2002 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
2004 &stcb->sctp_socket->so_rcv)) {
2005 SCTP_PRINTF("Append fails end:%d\n", end);
2006 goto failed_pdapi_express_del;
2009 * EY It is appended to the read queue in prev if
2010 * block here I should check if this delivered tsn
2011 * is out_of_order, if yes then update the nr_map
2013 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2014 /* EY debugging block */
2017 * printf("\nEY-Calculating an
2018 * nr_gap!!\nEY-mapping_array_size =
2019 * %d nr_mapping_array_size = %d"
2020 * "\nEY-mapping_array_base = %d
2021 * nr_mapping_array_base =
2022 * %d\nEY-highest_tsn_inside_map =
2023 * %d" "highest_tsn_inside_nr_map =
2024 * %d\nEY-TSN = %d nr_gap =
2025 * %d",asoc->mapping_array_size,
2026 * asoc->nr_mapping_array_size,
2027 * asoc->mapping_array_base_tsn,
2028 * asoc->nr_mapping_array_base_tsn,
2029 * asoc->highest_tsn_inside_map,
2030 * asoc->highest_tsn_inside_nr_map,ts
2034 /* EY - not %100 sure about the lock thing */
2035 SCTP_TCB_LOCK_ASSERT(stcb);
2036 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2037 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
2038 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2039 asoc->highest_tsn_inside_nr_map = tsn;
2041 SCTP_STAT_INCR(sctps_recvexpressm);
2042 control->sinfo_tsn = tsn;
2043 asoc->tsn_last_delivered = tsn;
2044 asoc->fragment_flags = chunk_flags;
2045 asoc->tsn_of_pdapi_last_delivered = tsn;
2046 asoc->last_flags_delivered = chunk_flags;
2047 asoc->last_strm_seq_delivered = strmseq;
2048 asoc->last_strm_no_delivered = strmno;
2050 /* clean up the flags and such */
2051 asoc->fragmented_delivery_inprogress = 0;
2052 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2053 asoc->strmin[strmno].last_sequence_delivered++;
2055 stcb->asoc.control_pdapi = NULL;
2056 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
2058 * There could be another message
2061 need_reasm_check = 1;
2065 goto finish_express_del;
2068 failed_pdapi_express_del:
2070 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2071 sctp_alloc_a_chunk(stcb, chk);
2073 /* No memory so we drop the chunk */
2074 SCTP_STAT_INCR(sctps_nomem);
2075 if (last_chunk == 0) {
2076 /* we copied it, free the copy */
2077 sctp_m_freem(dmbuf);
2081 chk->rec.data.TSN_seq = tsn;
2082 chk->no_fr_allowed = 0;
2083 chk->rec.data.stream_seq = strmseq;
2084 chk->rec.data.stream_number = strmno;
2085 chk->rec.data.payloadtype = protocol_id;
2086 chk->rec.data.context = stcb->asoc.context;
2087 chk->rec.data.doing_fast_retransmit = 0;
2088 chk->rec.data.rcv_flags = chunk_flags;
2090 chk->send_size = the_len;
2092 atomic_add_int(&net->ref_count, 1);
2095 sctp_alloc_a_readq(stcb, control);
2096 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2102 if (control == NULL) {
2103 /* No memory so we drop the chunk */
2104 SCTP_STAT_INCR(sctps_nomem);
2105 if (last_chunk == 0) {
2106 /* we copied it, free the copy */
2107 sctp_m_freem(dmbuf);
2111 control->length = the_len;
2114 /* Mark it as received */
2115 /* Now queue it where it belongs */
2116 if (control != NULL) {
2117 /* First a sanity check */
2118 if (asoc->fragmented_delivery_inprogress) {
2120 * Ok, we have a fragmented delivery in progress if
2121 * this chunk is next to deliver OR belongs in our
2122 * view to the reassembly, the peer is evil or
2125 uint32_t estimate_tsn;
2127 estimate_tsn = asoc->tsn_last_delivered + 1;
2128 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2129 (estimate_tsn == control->sinfo_tsn)) {
2130 /* Evil/Broke peer */
2131 sctp_m_freem(control->data);
2132 control->data = NULL;
2133 if (control->whoFrom) {
2134 sctp_free_remote_addr(control->whoFrom);
2135 control->whoFrom = NULL;
2137 sctp_free_a_readq(stcb, control);
2138 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2139 0, M_DONTWAIT, 1, MT_DATA);
2141 struct sctp_paramhdr *ph;
2144 SCTP_BUF_LEN(oper) =
2145 sizeof(struct sctp_paramhdr) +
2146 (3 * sizeof(uint32_t));
2147 ph = mtod(oper, struct sctp_paramhdr *);
2149 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2150 ph->param_length = htons(SCTP_BUF_LEN(oper));
2151 ippp = (uint32_t *) (ph + 1);
2152 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
2156 *ippp = ((strmno << 16) | strmseq);
2158 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
2159 sctp_abort_an_association(stcb->sctp_ep, stcb,
2160 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2165 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2166 sctp_m_freem(control->data);
2167 control->data = NULL;
2168 if (control->whoFrom) {
2169 sctp_free_remote_addr(control->whoFrom);
2170 control->whoFrom = NULL;
2172 sctp_free_a_readq(stcb, control);
2174 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2175 0, M_DONTWAIT, 1, MT_DATA);
2177 struct sctp_paramhdr *ph;
2180 SCTP_BUF_LEN(oper) =
2181 sizeof(struct sctp_paramhdr) +
2182 (3 * sizeof(uint32_t));
2184 struct sctp_paramhdr *);
2186 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2188 htons(SCTP_BUF_LEN(oper));
2189 ippp = (uint32_t *) (ph + 1);
2190 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
2194 *ippp = ((strmno << 16) | strmseq);
2196 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2197 sctp_abort_an_association(stcb->sctp_ep,
2198 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2205 /* No PDAPI running */
2206 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2208 * Reassembly queue is NOT empty validate
2209 * that this tsn does not need to be in
2210 * reasembly queue. If it does then our peer
2211 * is broken or evil.
2213 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2214 sctp_m_freem(control->data);
2215 control->data = NULL;
2216 if (control->whoFrom) {
2217 sctp_free_remote_addr(control->whoFrom);
2218 control->whoFrom = NULL;
2220 sctp_free_a_readq(stcb, control);
2221 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2222 0, M_DONTWAIT, 1, MT_DATA);
2224 struct sctp_paramhdr *ph;
2227 SCTP_BUF_LEN(oper) =
2228 sizeof(struct sctp_paramhdr) +
2229 (3 * sizeof(uint32_t));
2231 struct sctp_paramhdr *);
2233 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2235 htons(SCTP_BUF_LEN(oper));
2236 ippp = (uint32_t *) (ph + 1);
2237 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2241 *ippp = ((strmno << 16) | strmseq);
2243 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2244 sctp_abort_an_association(stcb->sctp_ep,
2245 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2252 /* ok, if we reach here we have passed the sanity checks */
2253 if (chunk_flags & SCTP_DATA_UNORDERED) {
2254 /* queue directly into socket buffer */
2255 sctp_add_to_readq(stcb->sctp_ep, stcb,
2257 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2260 * EY It is added to the read queue in prev if block
2261 * here I should check if this delivered tsn is
2262 * out_of_order, if yes then update the nr_map
2264 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2266 * EY check if the mapping_array and
2267 * nr_mapping array are consistent
2269 if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
2272 * sctp_process_a_data_chunk(6):
2273 * Something is wrong the map base
2274 * tsn" "\nEY-and nr_map base tsn
2275 * should be equal.");
2278 * EY - not %100 sure about the lock
2279 * thing, i think we don't need the
2282 /* SCTP_TCB_LOCK_ASSERT(stcb); */
2285 * printf("\nEY-Calculating an
2286 * nr_gap!!\nEY-mapping_array_size =
2287 * %d nr_mapping_array_size = %d"
2288 * "\nEY-mapping_array_base = %d
2289 * nr_mapping_array_base =
2290 * %d\nEY-highest_tsn_inside_map =
2291 * %d" "highest_tsn_inside_nr_map =
2292 * %d\nEY-TSN = %d nr_gap =
2293 * %d",asoc->mapping_array_size,
2294 * asoc->nr_mapping_array_size,
2295 * asoc->mapping_array_base_tsn,
2296 * asoc->nr_mapping_array_base_tsn,
2297 * asoc->highest_tsn_inside_map,
2298 * asoc->highest_tsn_inside_nr_map,ts
2302 SCTP_TCB_LOCK_ASSERT(stcb);
2303 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2304 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
2305 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2306 asoc->highest_tsn_inside_nr_map = tsn;
2310 * Special check for when streams are resetting. We
2311 * could be more smart about this and check the
2312 * actual stream to see if it is not being reset..
2313 * that way we would not create a HOLB when amongst
2314 * streams being reset and those not being reset.
2316 * We take complete messages that have a stream reset
2317 * intervening (aka the TSN is after where our
2318 * cum-ack needs to be) off and put them on a
2319 * pending_reply_queue. The reassembly ones we do
2320 * not have to worry about since they are all sorted
2321 * and proceessed by TSN order. It is only the
2322 * singletons I must worry about.
2324 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2325 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2328 * yep its past where we need to reset... go
2329 * ahead and queue it.
2331 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2333 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2335 struct sctp_queued_to_read *ctlOn;
2336 unsigned char inserted = 0;
2338 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2340 if (compare_with_wrap(control->sinfo_tsn,
2341 ctlOn->sinfo_tsn, MAX_TSN)) {
2342 ctlOn = TAILQ_NEXT(ctlOn, next);
2345 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2350 if (inserted == 0) {
2352 * must be put at end, use
2353 * prevP (all setup from
2354 * loop) to setup nextP.
2356 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2360 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2367 /* Into the re-assembly queue */
2368 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2371 * the assoc is now gone and chk was put onto the
2372 * reasm queue, which has all been freed.
2379 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2380 /* we have a new high score */
2381 asoc->highest_tsn_inside_map = tsn;
2382 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2383 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2386 if (tsn == (asoc->cumulative_tsn + 1)) {
2387 /* Update cum-ack */
2388 asoc->cumulative_tsn = tsn;
2394 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2396 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2398 SCTP_STAT_INCR(sctps_recvdata);
2399 /* Set it present please */
2400 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2401 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2403 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2404 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2405 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2407 SCTP_TCB_LOCK_ASSERT(stcb);
2408 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2410 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
2411 asoc->peer_supports_nr_sack &&
2412 (SCTP_BASE_SYSCTL(sctp_do_drain) == 0)) {
2413 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2414 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
2415 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2416 asoc->highest_tsn_inside_nr_map = tsn;
2419 /* check the special flag for stream resets */
2420 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2421 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2422 (asoc->cumulative_tsn == liste->tsn))
2425 * we have finished working through the backlogged TSN's now
2426 * time to reset streams. 1: call reset function. 2: free
2427 * pending_reply space 3: distribute any chunks in
2428 * pending_reply_queue.
2430 struct sctp_queued_to_read *ctl;
2432 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2433 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2434 SCTP_FREE(liste, SCTP_M_STRESET);
2435 /* sa_ignore FREED_MEMORY */
2436 liste = TAILQ_FIRST(&asoc->resetHead);
2437 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2438 if (ctl && (liste == NULL)) {
2439 /* All can be removed */
2441 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2442 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2446 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2449 /* more than one in queue */
2450 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2452 * if ctl->sinfo_tsn is <= liste->tsn we can
2453 * process it which is the NOT of
2454 * ctl->sinfo_tsn > liste->tsn
2456 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2457 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2461 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2465 * Now service re-assembly to pick up anything that has been
2466 * held on reassembly queue?
2468 sctp_deliver_reasm_check(stcb, asoc);
2469 need_reasm_check = 0;
2471 if (need_reasm_check) {
2472 /* Another one waits ? */
2473 sctp_deliver_reasm_check(stcb, asoc);
2478 int8_t sctp_map_lookup_tab[256] = {
2479 -1, 0, -1, 1, -1, 0, -1, 2,
2480 -1, 0, -1, 1, -1, 0, -1, 3,
2481 -1, 0, -1, 1, -1, 0, -1, 2,
2482 -1, 0, -1, 1, -1, 0, -1, 4,
2483 -1, 0, -1, 1, -1, 0, -1, 2,
2484 -1, 0, -1, 1, -1, 0, -1, 3,
2485 -1, 0, -1, 1, -1, 0, -1, 2,
2486 -1, 0, -1, 1, -1, 0, -1, 5,
2487 -1, 0, -1, 1, -1, 0, -1, 2,
2488 -1, 0, -1, 1, -1, 0, -1, 3,
2489 -1, 0, -1, 1, -1, 0, -1, 2,
2490 -1, 0, -1, 1, -1, 0, -1, 4,
2491 -1, 0, -1, 1, -1, 0, -1, 2,
2492 -1, 0, -1, 1, -1, 0, -1, 3,
2493 -1, 0, -1, 1, -1, 0, -1, 2,
2494 -1, 0, -1, 1, -1, 0, -1, 6,
2495 -1, 0, -1, 1, -1, 0, -1, 2,
2496 -1, 0, -1, 1, -1, 0, -1, 3,
2497 -1, 0, -1, 1, -1, 0, -1, 2,
2498 -1, 0, -1, 1, -1, 0, -1, 4,
2499 -1, 0, -1, 1, -1, 0, -1, 2,
2500 -1, 0, -1, 1, -1, 0, -1, 3,
2501 -1, 0, -1, 1, -1, 0, -1, 2,
2502 -1, 0, -1, 1, -1, 0, -1, 5,
2503 -1, 0, -1, 1, -1, 0, -1, 2,
2504 -1, 0, -1, 1, -1, 0, -1, 3,
2505 -1, 0, -1, 1, -1, 0, -1, 2,
2506 -1, 0, -1, 1, -1, 0, -1, 4,
2507 -1, 0, -1, 1, -1, 0, -1, 2,
2508 -1, 0, -1, 1, -1, 0, -1, 3,
2509 -1, 0, -1, 1, -1, 0, -1, 2,
2510 -1, 0, -1, 1, -1, 0, -1, 7,
2515 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2518 * Now we also need to check the mapping array in a couple of ways.
2519 * 1) Did we move the cum-ack point?
2521 struct sctp_association *asoc;
2523 int last_all_ones = 0;
2524 int slide_from, slide_end, lgap, distance;
2526 /* EY nr_mapping array variables */
2528 /* int nr_last_all_ones = 0; */
2529 /* int nr_slide_from, nr_slide_end, nr_lgap, nr_distance; */
2531 uint32_t old_cumack, old_base, old_highest;
2532 unsigned char aux_array[64];
2535 * EY! Don't think this is required but I am immitating the code for
2536 * map just to make sure
2538 unsigned char nr_aux_array[64];
2543 old_cumack = asoc->cumulative_tsn;
2544 old_base = asoc->mapping_array_base_tsn;
2545 old_highest = asoc->highest_tsn_inside_map;
2546 if (asoc->mapping_array_size < 64)
2547 memcpy(aux_array, asoc->mapping_array,
2548 asoc->mapping_array_size);
2550 memcpy(aux_array, asoc->mapping_array, 64);
2551 /* EY do the same for nr_mapping_array */
2552 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2554 if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
2556 * printf("\nEY-IN sack_check method: \nEY-" "The
2557 * size of map and nr_map are inconsitent")
2560 if (asoc->nr_mapping_array_base_tsn != asoc->mapping_array_base_tsn) {
2562 * printf("\nEY-IN sack_check method VERY CRUCIAL
2563 * error: \nEY-" "The base tsns of map and nr_map
2567 /* EY! just immitating the above code */
2568 if (asoc->nr_mapping_array_size < 64)
2569 memcpy(nr_aux_array, asoc->nr_mapping_array,
2570 asoc->nr_mapping_array_size);
2572 memcpy(aux_array, asoc->nr_mapping_array, 64);
2575 * We could probably improve this a small bit by calculating the
2576 * offset of the current cum-ack as the starting point.
2579 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2581 if (asoc->mapping_array[slide_from] == 0xff) {
2585 /* there is a 0 bit */
2586 at += sctp_map_lookup_tab[asoc->mapping_array[slide_from]];
2591 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones);
2592 /* at is one off, since in the table a embedded -1 is present */
2595 if (compare_with_wrap(asoc->cumulative_tsn,
2596 asoc->highest_tsn_inside_map,
2599 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2600 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2602 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2603 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2604 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2605 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2607 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2608 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2611 if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) {
2612 /* The complete array was completed by a single FR */
2613 /* higest becomes the cum-ack */
2616 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2617 /* clear the array */
2618 clr = (at >> 3) + 1;
2619 if (clr > asoc->mapping_array_size) {
2620 clr = asoc->mapping_array_size;
2622 memset(asoc->mapping_array, 0, clr);
2623 /* base becomes one ahead of the cum-ack */
2624 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2626 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2628 if (clr > asoc->nr_mapping_array_size)
2629 clr = asoc->nr_mapping_array_size;
2631 memset(asoc->nr_mapping_array, 0, clr);
2632 /* base becomes one ahead of the cum-ack */
2633 asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2634 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2636 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2637 sctp_log_map(old_base, old_cumack, old_highest,
2638 SCTP_MAP_PREPARE_SLIDE);
2639 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2640 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2642 } else if (at >= 8) {
2643 /* we can slide the mapping array down */
2644 /* slide_from holds where we hit the first NON 0xff byte */
2647 * now calculate the ceiling of the move using our highest
2650 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2651 lgap = asoc->highest_tsn_inside_map -
2652 asoc->mapping_array_base_tsn;
2654 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2655 asoc->highest_tsn_inside_map + 1;
2657 slide_end = lgap >> 3;
2658 if (slide_end < slide_from) {
2660 panic("impossible slide");
2662 printf("impossible slide?\n");
2666 if (slide_end > asoc->mapping_array_size) {
2668 panic("would overrun buffer");
2670 printf("Gak, would have overrun map end:%d slide_end:%d\n",
2671 asoc->mapping_array_size, slide_end);
2672 slide_end = asoc->mapping_array_size;
2675 distance = (slide_end - slide_from) + 1;
2676 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2677 sctp_log_map(old_base, old_cumack, old_highest,
2678 SCTP_MAP_PREPARE_SLIDE);
2679 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2680 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2682 if (distance + slide_from > asoc->mapping_array_size ||
2685 * Here we do NOT slide forward the array so that
2686 * hopefully when more data comes in to fill it up
2687 * we will be able to slide it forward. Really I
2688 * don't think this should happen :-0
2691 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2692 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2693 (uint32_t) asoc->mapping_array_size,
2694 SCTP_MAP_SLIDE_NONE);
2699 for (ii = 0; ii < distance; ii++) {
2700 asoc->mapping_array[ii] =
2701 asoc->mapping_array[slide_from + ii];
2703 for (ii = distance; ii <= slide_end; ii++) {
2704 asoc->mapping_array[ii] = 0;
2706 asoc->mapping_array_base_tsn += (slide_from << 3);
2707 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2708 sctp_log_map(asoc->mapping_array_base_tsn,
2709 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2710 SCTP_MAP_SLIDE_RESULT);
2713 * EY if doing nr_sacks then slide the
2714 * nr_mapping_array accordingly please
2716 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2717 for (ii = 0; ii < distance; ii++) {
2718 asoc->nr_mapping_array[ii] =
2719 asoc->nr_mapping_array[slide_from + ii];
2721 for (ii = distance; ii <= slide_end; ii++) {
2722 asoc->nr_mapping_array[ii] = 0;
2724 asoc->nr_mapping_array_base_tsn += (slide_from << 3);
2729 * Now we need to see if we need to queue a sack or just start the
2730 * timer (if allowed).
2733 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2735 * Ok special case, in SHUTDOWN-SENT case. here we
2736 * maker sure SACK timer is off and instead send a
2737 * SHUTDOWN and a SACK
2739 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2740 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2741 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2743 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2745 * EY if nr_sacks used then send an nr-sack , a sack
2748 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2749 sctp_send_nr_sack(stcb);
2751 sctp_send_sack(stcb);
2755 /* is there a gap now ? */
2756 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2757 stcb->asoc.cumulative_tsn, MAX_TSN);
2760 * CMT DAC algorithm: increase number of packets
2761 * received since last ack
2763 stcb->asoc.cmt_dac_pkts_rcvd++;
2765 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2767 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2769 (stcb->asoc.numduptsns) || /* we have dup's */
2770 (is_a_gap) || /* is still a gap */
2771 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2772 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2775 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2776 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2777 (stcb->asoc.send_sack == 0) &&
2778 (stcb->asoc.numduptsns == 0) &&
2779 (stcb->asoc.delayed_ack) &&
2780 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2783 * CMT DAC algorithm: With CMT,
2784 * delay acks even in the face of
2786 * reordering. Therefore, if acks that
2787 * do not have to be sent because of
2788 * the above reasons, will be
2789 * delayed. That is, acks that would
2790 * have been sent due to gap reports
2791 * will be delayed with DAC. Start
2792 * the delayed ack timer.
2794 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2795 stcb->sctp_ep, stcb, NULL);
2798 * Ok we must build a SACK since the
2799 * timer is pending, we got our
2800 * first packet OR there are gaps or
2803 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2805 * EY if nr_sacks used then send an
2806 * nr-sack , a sack otherwise
2808 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2809 sctp_send_nr_sack(stcb);
2811 sctp_send_sack(stcb);
2814 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2815 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2816 stcb->sctp_ep, stcb, NULL);
2824 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2826 struct sctp_tmit_chunk *chk;
2830 if (asoc->fragmented_delivery_inprogress) {
2831 sctp_service_reassembly(stcb, asoc);
2833 /* Can we proceed further, i.e. the PD-API is complete */
2834 if (asoc->fragmented_delivery_inprogress) {
2839 * Now is there some other chunk I can deliver from the reassembly
2843 chk = TAILQ_FIRST(&asoc->reasmqueue);
2845 asoc->size_on_reasm_queue = 0;
2846 asoc->cnt_on_reasm_queue = 0;
2849 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2850 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2851 ((nxt_todel == chk->rec.data.stream_seq) ||
2852 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2854 * Yep the first one is here. We setup to start reception,
2855 * by backing down the TSN just in case we can't deliver.
2859 * Before we start though either all of the message should
2860 * be here or 1/4 the socket buffer max or nothing on the
2861 * delivery queue and something can be delivered.
2863 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2864 (tsize >= stcb->sctp_ep->partial_delivery_point))) {
2865 asoc->fragmented_delivery_inprogress = 1;
2866 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2867 asoc->str_of_pdapi = chk->rec.data.stream_number;
2868 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2869 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2870 asoc->fragment_flags = chk->rec.data.rcv_flags;
2871 sctp_service_reassembly(stcb, asoc);
2872 if (asoc->fragmented_delivery_inprogress == 0) {
2880 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2881 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2882 struct sctp_nets *net, uint32_t * high_tsn)
2884 struct sctp_data_chunk *ch, chunk_buf;
2885 struct sctp_association *asoc;
2886 int num_chunks = 0; /* number of control chunks processed */
2888 int chk_length, break_flag, last_chunk;
2889 int abort_flag = 0, was_a_gap = 0;
2893 sctp_set_rwnd(stcb, &stcb->asoc);
2896 SCTP_TCB_LOCK_ASSERT(stcb);
2898 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2899 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2900 /* there was a gap before this data was processed */
2904 * setup where we got the last DATA packet from for any SACK that
2905 * may need to go out. Don't bump the net. This is done ONLY when a
2906 * chunk is assigned.
2908 asoc->last_data_chunk_from = net;
2911 * Now before we proceed we must figure out if this is a wasted
2912 * cluster... i.e. it is a small packet sent in and yet the driver
2913 * underneath allocated a full cluster for it. If so we must copy it
2914 * to a smaller mbuf and free up the cluster mbuf. This will help
2915 * with cluster starvation. Note for __Panda__ we don't do this
2916 * since it has clusters all the way down to 64 bytes.
2918 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2919 /* we only handle mbufs that are singletons.. not chains */
2920 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2922 /* ok lets see if we can copy the data up */
2925 /* get the pointers and copy */
2926 to = mtod(m, caddr_t *);
2927 from = mtod((*mm), caddr_t *);
2928 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2929 /* copy the length and free up the old */
2930 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2932 /* sucess, back copy */
2935 /* We are in trouble in the mbuf world .. yikes */
2939 /* get pointer to the first chunk header */
2940 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2941 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2946 * process all DATA chunks...
2948 *high_tsn = asoc->cumulative_tsn;
2950 asoc->data_pkts_seen++;
2951 while (stop_proc == 0) {
2952 /* validate chunk length */
2953 chk_length = ntohs(ch->ch.chunk_length);
2954 if (length - *offset < chk_length) {
2955 /* all done, mutulated chunk */
2959 if (ch->ch.chunk_type == SCTP_DATA) {
2960 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2962 * Need to send an abort since we had a
2963 * invalid data chunk.
2965 struct mbuf *op_err;
2967 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2968 0, M_DONTWAIT, 1, MT_DATA);
2971 struct sctp_paramhdr *ph;
2974 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2975 (2 * sizeof(uint32_t));
2976 ph = mtod(op_err, struct sctp_paramhdr *);
2978 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2979 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2980 ippp = (uint32_t *) (ph + 1);
2981 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2983 *ippp = asoc->cumulative_tsn;
2986 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2987 sctp_abort_association(inp, stcb, m, iphlen, sh,
2988 op_err, 0, net->port);
2991 #ifdef SCTP_AUDITING_ENABLED
2992 sctp_audit_log(0xB1, 0);
2994 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2999 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
3000 chk_length, net, high_tsn, &abort_flag, &break_flag,
3009 * Set because of out of rwnd space and no
3010 * drop rep space left.
3016 /* not a data chunk in the data region */
3017 switch (ch->ch.chunk_type) {
3018 case SCTP_INITIATION:
3019 case SCTP_INITIATION_ACK:
3020 case SCTP_SELECTIVE_ACK:
3021 case SCTP_NR_SELECTIVE_ACK: /* EY */
3022 case SCTP_HEARTBEAT_REQUEST:
3023 case SCTP_HEARTBEAT_ACK:
3024 case SCTP_ABORT_ASSOCIATION:
3026 case SCTP_SHUTDOWN_ACK:
3027 case SCTP_OPERATION_ERROR:
3028 case SCTP_COOKIE_ECHO:
3029 case SCTP_COOKIE_ACK:
3032 case SCTP_SHUTDOWN_COMPLETE:
3033 case SCTP_AUTHENTICATION:
3034 case SCTP_ASCONF_ACK:
3035 case SCTP_PACKET_DROPPED:
3036 case SCTP_STREAM_RESET:
3037 case SCTP_FORWARD_CUM_TSN:
3040 * Now, what do we do with KNOWN chunks that
3041 * are NOT in the right place?
3043 * For now, I do nothing but ignore them. We
3044 * may later want to add sysctl stuff to
3045 * switch out and do either an ABORT() or
3046 * possibly process them.
3048 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
3049 struct mbuf *op_err;
3051 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
3052 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
3057 /* unknown chunk type, use bit rules */
3058 if (ch->ch.chunk_type & 0x40) {
3059 /* Add a error report to the queue */
3061 struct sctp_paramhdr *phd;
3063 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
3065 phd = mtod(merr, struct sctp_paramhdr *);
3067 * We cheat and use param
3068 * type since we did not
3069 * bother to define a error
3070 * cause struct. They are
3071 * the same basic format
3072 * with different names.
3075 htons(SCTP_CAUSE_UNRECOG_CHUNK);
3077 htons(chk_length + sizeof(*phd));
3078 SCTP_BUF_LEN(merr) = sizeof(*phd);
3079 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
3080 SCTP_SIZE32(chk_length),
3082 if (SCTP_BUF_NEXT(merr)) {
3083 sctp_queue_op_err(stcb, merr);
3089 if ((ch->ch.chunk_type & 0x80) == 0) {
3090 /* discard the rest of this packet */
3092 } /* else skip this bad chunk and
3095 }; /* switch of chunk type */
3097 *offset += SCTP_SIZE32(chk_length);
3098 if ((*offset >= length) || stop_proc) {
3099 /* no more data left in the mbuf chain */
3103 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
3104 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
3114 * we need to report rwnd overrun drops.
3116 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
3120 * Did we get data, if so update the time for auto-close and
3121 * give peer credit for being alive.
3123 SCTP_STAT_INCR(sctps_recvpktwithdata);
3124 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3125 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3126 stcb->asoc.overall_error_count,
3128 SCTP_FROM_SCTP_INDATA,
3131 stcb->asoc.overall_error_count = 0;
3132 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
3134 /* now service all of the reassm queue if needed */
3135 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
3136 sctp_service_queues(stcb, asoc);
3138 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
3139 /* Assure that we ack right away */
3140 stcb->asoc.send_sack = 1;
3142 /* Start a sack timer or QUEUE a SACK for sending */
3143 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
3144 (stcb->asoc.mapping_array[0] != 0xff)) {
3145 if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) ||
3146 (stcb->asoc.delayed_ack == 0) ||
3147 (stcb->asoc.numduptsns) ||
3148 (stcb->asoc.send_sack == 1)) {
3149 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3150 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
3153 * EY if nr_sacks used then send an nr-sack , a sack
3156 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
3157 sctp_send_nr_sack(stcb);
3159 sctp_send_sack(stcb);
3161 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3162 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
3163 stcb->sctp_ep, stcb, NULL);
3167 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
3176 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3177 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3178 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3179 int num_seg, int *ecn_seg_sums)
3181 /************************************************/
3182 /* process fragments and update sendqueue */
3183 /************************************************/
3184 struct sctp_sack *sack;
3185 struct sctp_gap_ack_block *frag, block;
3186 struct sctp_tmit_chunk *tp1;
3188 unsigned int theTSN;
3191 uint16_t frag_strt, frag_end, primary_flag_set;
3192 u_long last_frag_high;
3195 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
3197 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3198 primary_flag_set = 1;
3200 primary_flag_set = 0;
3204 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3205 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3206 *offset += sizeof(block);
3212 for (i = 0; i < num_seg; i++) {
3213 frag_strt = ntohs(frag->start);
3214 frag_end = ntohs(frag->end);
3215 /* some sanity checks on the fragment offsets */
3216 if (frag_strt > frag_end) {
3217 /* this one is malformed, skip */
3221 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3223 *biggest_tsn_acked = frag_end + last_tsn;
3225 /* mark acked dgs and find out the highestTSN being acked */
3227 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3229 /* save the locations of the last frags */
3230 last_frag_high = frag_end + last_tsn;
3233 * now lets see if we need to reset the queue due to
3234 * a out-of-order SACK fragment
3236 if (compare_with_wrap(frag_strt + last_tsn,
3237 last_frag_high, MAX_TSN)) {
3239 * if the new frag starts after the last TSN
3240 * frag covered, we are ok and this one is
3241 * beyond the last one
3246 * ok, they have reset us, so we need to
3247 * reset the queue this will cause extra
3248 * hunting but hey, they chose the
3249 * performance hit when they failed to order
3252 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3254 last_frag_high = frag_end + last_tsn;
3256 for (j = frag_strt; j <= frag_end; j++) {
3257 theTSN = j + last_tsn;
3259 if (tp1->rec.data.doing_fast_retransmit)
3263 * CMT: CUCv2 algorithm. For each TSN being
3264 * processed from the sent queue, track the
3265 * next expected pseudo-cumack, or
3266 * rtx_pseudo_cumack, if required. Separate
3267 * cumack trackers for first transmissions,
3268 * and retransmissions.
3270 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3271 (tp1->snd_count == 1)) {
3272 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
3273 tp1->whoTo->find_pseudo_cumack = 0;
3275 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3276 (tp1->snd_count > 1)) {
3277 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
3278 tp1->whoTo->find_rtx_pseudo_cumack = 0;
3280 if (tp1->rec.data.TSN_seq == theTSN) {
3281 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3283 * must be held until
3287 * ECN Nonce: Add the nonce
3288 * value to the sender's
3291 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3293 * If it is less than RESEND, it is
3294 * now no-longer in flight.
3295 * Higher values may already be set
3296 * via previous Gap Ack Blocks...
3297 * i.e. ACKED or RESEND.
3299 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3300 *biggest_newly_acked_tsn, MAX_TSN)) {
3301 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
3310 * this_sack_highest_
3314 if (tp1->rec.data.chunk_was_revoked == 0)
3315 tp1->whoTo->saw_newack = 1;
3317 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3318 tp1->whoTo->this_sack_highest_newack,
3320 tp1->whoTo->this_sack_highest_newack =
3321 tp1->rec.data.TSN_seq;
3326 * this_sack_lowest_n
3329 if (*this_sack_lowest_newack == 0) {
3330 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3331 sctp_log_sack(*this_sack_lowest_newack,
3333 tp1->rec.data.TSN_seq,
3336 SCTP_LOG_TSN_ACKED);
3338 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
3343 * (rtx-)pseudo-cumac
3348 * (rtx-)pseudo-cumac
3350 * new_(rtx_)pseudo_c
3358 * (rtx-)pseudo-cumac
3366 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
3367 if (tp1->rec.data.chunk_was_revoked == 0) {
3368 tp1->whoTo->new_pseudo_cumack = 1;
3370 tp1->whoTo->find_pseudo_cumack = 1;
3372 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3373 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3375 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
3376 if (tp1->rec.data.chunk_was_revoked == 0) {
3377 tp1->whoTo->new_pseudo_cumack = 1;
3379 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3381 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3382 sctp_log_sack(*biggest_newly_acked_tsn,
3384 tp1->rec.data.TSN_seq,
3387 SCTP_LOG_TSN_ACKED);
3389 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3390 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3391 tp1->whoTo->flight_size,
3393 (uintptr_t) tp1->whoTo,
3394 tp1->rec.data.TSN_seq);
3396 sctp_flight_size_decrease(tp1);
3397 sctp_total_flight_decrease(stcb, tp1);
3399 tp1->whoTo->net_ack += tp1->send_size;
3400 if (tp1->snd_count < 2) {
3406 tp1->whoTo->net_ack2 += tp1->send_size;
3413 sctp_calculate_rto(stcb,
3416 &tp1->sent_rcv_time,
3417 sctp_align_safe_nocopy);
3422 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3423 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3424 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3425 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3426 asoc->this_sack_highest_gap,
3428 asoc->this_sack_highest_gap =
3429 tp1->rec.data.TSN_seq;
3431 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3432 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3433 #ifdef SCTP_AUDITING_ENABLED
3434 sctp_audit_log(0xB2,
3435 (asoc->sent_queue_retran_cnt & 0x000000ff));
3440 * All chunks NOT UNSENT
3441 * fall through here and are
3442 * marked (leave PR-SCTP
3443 * ones that are to skip
3446 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3447 tp1->sent = SCTP_DATAGRAM_MARKED;
3449 if (tp1->rec.data.chunk_was_revoked) {
3450 /* deflate the cwnd */
3451 tp1->whoTo->cwnd -= tp1->book_size;
3452 tp1->rec.data.chunk_was_revoked = 0;
3456 } /* if (tp1->TSN_seq == theTSN) */
3457 if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3461 tp1 = TAILQ_NEXT(tp1, sctp_next);
3462 } /* end while (tp1) */
3463 } /* end for (j = fragStart */
3464 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3465 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3466 *offset += sizeof(block);
3471 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3473 sctp_log_fr(*biggest_tsn_acked,
3474 *biggest_newly_acked_tsn,
3475 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3480 sctp_check_for_revoked(struct sctp_tcb *stcb,
3481 struct sctp_association *asoc, uint32_t cumack,
3482 u_long biggest_tsn_acked)
3484 struct sctp_tmit_chunk *tp1;
3485 int tot_revoked = 0;
3487 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3489 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3492 * ok this guy is either ACK or MARKED. If it is
3493 * ACKED it has been previously acked but not this
3494 * time i.e. revoked. If it is MARKED it was ACK'ed
3497 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3502 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3503 /* it has been revoked */
3504 tp1->sent = SCTP_DATAGRAM_SENT;
3505 tp1->rec.data.chunk_was_revoked = 1;
3507 * We must add this stuff back in to assure
3508 * timers and such get started.
3510 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3511 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3512 tp1->whoTo->flight_size,
3514 (uintptr_t) tp1->whoTo,
3515 tp1->rec.data.TSN_seq);
3517 sctp_flight_size_increase(tp1);
3518 sctp_total_flight_increase(stcb, tp1);
3520 * We inflate the cwnd to compensate for our
3521 * artificial inflation of the flight_size.
3523 tp1->whoTo->cwnd += tp1->book_size;
3525 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3526 sctp_log_sack(asoc->last_acked_seq,
3528 tp1->rec.data.TSN_seq,
3531 SCTP_LOG_TSN_REVOKED);
3533 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3534 /* it has been re-acked in this SACK */
3535 tp1->sent = SCTP_DATAGRAM_ACKED;
3538 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3540 tp1 = TAILQ_NEXT(tp1, sctp_next);
3542 if (tot_revoked > 0) {
3544 * Setup the ecn nonce re-sync point. We do this since once
3545 * data is revoked we begin to retransmit things, which do
3546 * NOT have the ECN bits set. This means we are now out of
3547 * sync and must wait until we get back in sync with the
3548 * peer to check ECN bits.
3550 tp1 = TAILQ_FIRST(&asoc->send_queue);
3552 asoc->nonce_resync_tsn = asoc->sending_seq;
3554 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3556 asoc->nonce_wait_for_ecne = 0;
3557 asoc->nonce_sum_check = 0;
3563 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3564 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3566 struct sctp_tmit_chunk *tp1;
3567 int strike_flag = 0;
3569 int tot_retrans = 0;
3570 uint32_t sending_seq;
3571 struct sctp_nets *net;
3572 int num_dests_sacked = 0;
3575 * select the sending_seq, this is either the next thing ready to be
3576 * sent but not transmitted, OR, the next seq we assign.
3578 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3580 sending_seq = asoc->sending_seq;
3582 sending_seq = tp1->rec.data.TSN_seq;
3585 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3586 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3587 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3588 if (net->saw_newack)
3592 if (stcb->asoc.peer_supports_prsctp) {
3593 (void)SCTP_GETTIME_TIMEVAL(&now);
3595 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3598 if (tp1->no_fr_allowed) {
3599 /* this one had a timeout or something */
3600 tp1 = TAILQ_NEXT(tp1, sctp_next);
3603 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3604 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3605 sctp_log_fr(biggest_tsn_newly_acked,
3606 tp1->rec.data.TSN_seq,
3608 SCTP_FR_LOG_CHECK_STRIKE);
3610 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3612 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3616 if (stcb->asoc.peer_supports_prsctp) {
3617 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3618 /* Is it expired? */
3621 * TODO sctp_constants.h needs alternative
3622 * time macros when _KERNEL is undefined.
3624 (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3626 /* Yes so drop it */
3627 if (tp1->data != NULL) {
3628 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3629 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3630 SCTP_SO_NOT_LOCKED);
3632 tp1 = TAILQ_NEXT(tp1, sctp_next);
3637 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3638 asoc->this_sack_highest_gap, MAX_TSN)) {
3639 /* we are beyond the tsn in the sack */
3642 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3643 /* either a RESEND, ACKED, or MARKED */
3645 tp1 = TAILQ_NEXT(tp1, sctp_next);
3649 * CMT : SFR algo (covers part of DAC and HTNA as well)
3651 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3653 * No new acks were receieved for data sent to this
3654 * dest. Therefore, according to the SFR algo for
3655 * CMT, no data sent to this dest can be marked for
3656 * FR using this SACK.
3658 tp1 = TAILQ_NEXT(tp1, sctp_next);
3660 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3661 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3663 * CMT: New acks were receieved for data sent to
3664 * this dest. But no new acks were seen for data
3665 * sent after tp1. Therefore, according to the SFR
3666 * algo for CMT, tp1 cannot be marked for FR using
3667 * this SACK. This step covers part of the DAC algo
3668 * and the HTNA algo as well.
3670 tp1 = TAILQ_NEXT(tp1, sctp_next);
3674 * Here we check to see if we were have already done a FR
3675 * and if so we see if the biggest TSN we saw in the sack is
3676 * smaller than the recovery point. If so we don't strike
3677 * the tsn... otherwise we CAN strike the TSN.
3680 * @@@ JRI: Check for CMT if (accum_moved &&
3681 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3684 if (accum_moved && asoc->fast_retran_loss_recovery) {
3686 * Strike the TSN if in fast-recovery and cum-ack
3689 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3690 sctp_log_fr(biggest_tsn_newly_acked,
3691 tp1->rec.data.TSN_seq,
3693 SCTP_FR_LOG_STRIKE_CHUNK);
3695 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3698 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3700 * CMT DAC algorithm: If SACK flag is set to
3701 * 0, then lowest_newack test will not pass
3702 * because it would have been set to the
3703 * cumack earlier. If not already to be
3704 * rtx'd, If not a mixed sack and if tp1 is
3705 * not between two sacked TSNs, then mark by
3706 * one more. NOTE that we are marking by one
3707 * additional time since the SACK DAC flag
3708 * indicates that two packets have been
3709 * received after this missing TSN.
3711 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3712 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3713 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3714 sctp_log_fr(16 + num_dests_sacked,
3715 tp1->rec.data.TSN_seq,
3717 SCTP_FR_LOG_STRIKE_CHUNK);
3722 } else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3724 * For those that have done a FR we must take
3725 * special consideration if we strike. I.e the
3726 * biggest_newly_acked must be higher than the
3727 * sending_seq at the time we did the FR.
3730 #ifdef SCTP_FR_TO_ALTERNATE
3732 * If FR's go to new networks, then we must only do
3733 * this for singly homed asoc's. However if the FR's
3734 * go to the same network (Armando's work) then its
3735 * ok to FR multiple times.
3743 if ((compare_with_wrap(biggest_tsn_newly_acked,
3744 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3745 (biggest_tsn_newly_acked ==
3746 tp1->rec.data.fast_retran_tsn)) {
3748 * Strike the TSN, since this ack is
3749 * beyond where things were when we
3752 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3753 sctp_log_fr(biggest_tsn_newly_acked,
3754 tp1->rec.data.TSN_seq,
3756 SCTP_FR_LOG_STRIKE_CHUNK);
3758 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3762 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3764 * CMT DAC algorithm: If
3765 * SACK flag is set to 0,
3766 * then lowest_newack test
3767 * will not pass because it
3768 * would have been set to
3769 * the cumack earlier. If
3770 * not already to be rtx'd,
3771 * If not a mixed sack and
3772 * if tp1 is not between two
3773 * sacked TSNs, then mark by
3774 * one more. NOTE that we
3775 * are marking by one
3776 * additional time since the
3777 * SACK DAC flag indicates
3778 * that two packets have
3779 * been received after this
3782 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3783 (num_dests_sacked == 1) &&
3784 compare_with_wrap(this_sack_lowest_newack,
3785 tp1->rec.data.TSN_seq, MAX_TSN)) {
3786 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3787 sctp_log_fr(32 + num_dests_sacked,
3788 tp1->rec.data.TSN_seq,
3790 SCTP_FR_LOG_STRIKE_CHUNK);
3792 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3800 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3803 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3804 biggest_tsn_newly_acked, MAX_TSN)) {
3806 * We don't strike these: This is the HTNA
3807 * algorithm i.e. we don't strike If our TSN is
3808 * larger than the Highest TSN Newly Acked.
3812 /* Strike the TSN */
3813 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3814 sctp_log_fr(biggest_tsn_newly_acked,
3815 tp1->rec.data.TSN_seq,
3817 SCTP_FR_LOG_STRIKE_CHUNK);
3819 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3822 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3824 * CMT DAC algorithm: If SACK flag is set to
3825 * 0, then lowest_newack test will not pass
3826 * because it would have been set to the
3827 * cumack earlier. If not already to be
3828 * rtx'd, If not a mixed sack and if tp1 is
3829 * not between two sacked TSNs, then mark by
3830 * one more. NOTE that we are marking by one
3831 * additional time since the SACK DAC flag
3832 * indicates that two packets have been
3833 * received after this missing TSN.
3835 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3836 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3837 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3838 sctp_log_fr(48 + num_dests_sacked,
3839 tp1->rec.data.TSN_seq,
3841 SCTP_FR_LOG_STRIKE_CHUNK);
3847 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3848 struct sctp_nets *alt;
3850 /* fix counts and things */
3851 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3852 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3853 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3855 (uintptr_t) tp1->whoTo,
3856 tp1->rec.data.TSN_seq);
3859 tp1->whoTo->net_ack++;
3860 sctp_flight_size_decrease(tp1);
3862 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3863 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3864 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3866 /* add back to the rwnd */
3867 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3869 /* remove from the total flight */
3870 sctp_total_flight_decrease(stcb, tp1);
3872 if ((stcb->asoc.peer_supports_prsctp) &&
3873 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3875 * Has it been retransmitted tv_sec times? -
3876 * we store the retran count there.
3878 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3879 /* Yes, so drop it */
3880 if (tp1->data != NULL) {
3881 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3882 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3883 SCTP_SO_NOT_LOCKED);
3885 /* Make sure to flag we had a FR */
3886 tp1->whoTo->net_ack++;
3887 tp1 = TAILQ_NEXT(tp1, sctp_next);
3891 /* printf("OK, we are now ready to FR this guy\n"); */
3892 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3893 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3897 /* This is a subsequent FR */
3898 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3900 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3901 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3903 * CMT: Using RTX_SSTHRESH policy for CMT.
3904 * If CMT is being used, then pick dest with
3905 * largest ssthresh for any retransmission.
3907 tp1->no_fr_allowed = 1;
3909 /* sa_ignore NO_NULL_CHK */
3910 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3912 * JRS 5/18/07 - If CMT PF is on,
3913 * use the PF version of
3916 alt = sctp_find_alternate_net(stcb, alt, 2);
3919 * JRS 5/18/07 - If only CMT is on,
3920 * use the CMT version of
3923 /* sa_ignore NO_NULL_CHK */
3924 alt = sctp_find_alternate_net(stcb, alt, 1);
3930 * CUCv2: If a different dest is picked for
3931 * the retransmission, then new
3932 * (rtx-)pseudo_cumack needs to be tracked
3933 * for orig dest. Let CUCv2 track new (rtx-)
3934 * pseudo-cumack always.
3937 tp1->whoTo->find_pseudo_cumack = 1;
3938 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3940 } else {/* CMT is OFF */
3942 #ifdef SCTP_FR_TO_ALTERNATE
3943 /* Can we find an alternate? */
3944 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3947 * default behavior is to NOT retransmit
3948 * FR's to an alternate. Armando Caro's
3949 * paper details why.
3955 tp1->rec.data.doing_fast_retransmit = 1;
3957 /* mark the sending seq for possible subsequent FR's */
3959 * printf("Marking TSN for FR new value %x\n",
3960 * (uint32_t)tpi->rec.data.TSN_seq);
3962 if (TAILQ_EMPTY(&asoc->send_queue)) {
3964 * If the queue of send is empty then its
3965 * the next sequence number that will be
3966 * assigned so we subtract one from this to
3967 * get the one we last sent.
3969 tp1->rec.data.fast_retran_tsn = sending_seq;
3972 * If there are chunks on the send queue
3973 * (unsent data that has made it from the
3974 * stream queues but not out the door, we
3975 * take the first one (which will have the
3976 * lowest TSN) and subtract one to get the
3979 struct sctp_tmit_chunk *ttt;
3981 ttt = TAILQ_FIRST(&asoc->send_queue);
3982 tp1->rec.data.fast_retran_tsn =
3983 ttt->rec.data.TSN_seq;
3988 * this guy had a RTO calculation pending on
3993 if (alt != tp1->whoTo) {
3994 /* yes, there is an alternate. */
3995 sctp_free_remote_addr(tp1->whoTo);
3996 /* sa_ignore FREED_MEMORY */
3998 atomic_add_int(&alt->ref_count, 1);
4001 tp1 = TAILQ_NEXT(tp1, sctp_next);
4004 if (tot_retrans > 0) {
4006 * Setup the ecn nonce re-sync point. We do this since once
4007 * we go to FR something we introduce a Karn's rule scenario
4008 * and won't know the totals for the ECN bits.
4010 asoc->nonce_resync_tsn = sending_seq;
4011 asoc->nonce_wait_for_ecne = 0;
4012 asoc->nonce_sum_check = 0;
4016 struct sctp_tmit_chunk *
4017 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
4018 struct sctp_association *asoc)
4020 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
4024 if (asoc->peer_supports_prsctp == 0) {
4027 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4029 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
4030 tp1->sent != SCTP_DATAGRAM_RESEND) {
4031 /* no chance to advance, out of here */
4034 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4035 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4036 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4037 asoc->advanced_peer_ack_point,
4038 tp1->rec.data.TSN_seq, 0, 0);
4041 if (!PR_SCTP_ENABLED(tp1->flags)) {
4043 * We can't fwd-tsn past any that are reliable aka
4044 * retransmitted until the asoc fails.
4049 (void)SCTP_GETTIME_TIMEVAL(&now);
4052 tp2 = TAILQ_NEXT(tp1, sctp_next);
4054 * now we got a chunk which is marked for another
4055 * retransmission to a PR-stream but has run out its chances
4056 * already maybe OR has been marked to skip now. Can we skip
4057 * it if its a resend?
4059 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
4060 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
4062 * Now is this one marked for resend and its time is
4065 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
4066 /* Yes so drop it */
4068 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
4069 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
4070 SCTP_SO_NOT_LOCKED);
4074 * No, we are done when hit one for resend
4075 * whos time as not expired.
4081 * Ok now if this chunk is marked to drop it we can clean up
4082 * the chunk, advance our peer ack point and we can check
4085 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4086 /* advance PeerAckPoint goes forward */
4087 if (compare_with_wrap(tp1->rec.data.TSN_seq,
4088 asoc->advanced_peer_ack_point,
4091 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
4093 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
4094 /* No update but we do save the chk */
4099 * If it is still in RESEND we can advance no
4105 * If we hit here we just dumped tp1, move to next tsn on
4114 sctp_fs_audit(struct sctp_association *asoc)
4116 struct sctp_tmit_chunk *chk;
4117 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
4118 int entry_flight, entry_cnt, ret;
4120 entry_flight = asoc->total_flight;
4121 entry_cnt = asoc->total_flight_count;
4124 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
4127 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4128 if (chk->sent < SCTP_DATAGRAM_RESEND) {
4129 printf("Chk TSN:%u size:%d inflight cnt:%d\n",
4130 chk->rec.data.TSN_seq,
4135 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
4137 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
4139 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
4146 if ((inflight > 0) || (inbetween > 0)) {
4148 panic("Flight size-express incorrect? \n");
4150 printf("asoc->total_flight:%d cnt:%d\n",
4151 entry_flight, entry_cnt);
4153 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
4154 inflight, inbetween, resend, above, acked);
4163 sctp_window_probe_recovery(struct sctp_tcb *stcb,
4164 struct sctp_association *asoc,
4165 struct sctp_nets *net,
4166 struct sctp_tmit_chunk *tp1)
4168 tp1->window_probe = 0;
4169 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
4170 /* TSN's skipped we do NOT move back. */
4171 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
4172 tp1->whoTo->flight_size,
4174 (uintptr_t) tp1->whoTo,
4175 tp1->rec.data.TSN_seq);
4178 /* First setup this by shrinking flight */
4179 sctp_flight_size_decrease(tp1);
4180 sctp_total_flight_decrease(stcb, tp1);
4181 /* Now mark for resend */
4182 tp1->sent = SCTP_DATAGRAM_RESEND;
4183 asoc->sent_queue_retran_cnt++;
4184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4185 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
4186 tp1->whoTo->flight_size,
4188 (uintptr_t) tp1->whoTo,
4189 tp1->rec.data.TSN_seq);
4194 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4195 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
4197 struct sctp_nets *net;
4198 struct sctp_association *asoc;
4199 struct sctp_tmit_chunk *tp1, *tp2;
4201 int win_probe_recovery = 0;
4202 int win_probe_recovered = 0;
4203 int j, done_once = 0;
4205 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4206 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
4207 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4209 SCTP_TCB_LOCK_ASSERT(stcb);
4210 #ifdef SCTP_ASOCLOG_OF_TSNS
4211 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
4212 stcb->asoc.cumack_log_at++;
4213 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4214 stcb->asoc.cumack_log_at = 0;
4218 old_rwnd = asoc->peers_rwnd;
4219 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
4222 } else if (asoc->last_acked_seq == cumack) {
4223 /* Window update sack */
4224 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4225 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4226 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4227 /* SWS sender side engages */
4228 asoc->peers_rwnd = 0;
4230 if (asoc->peers_rwnd > old_rwnd) {
4235 /* First setup for CC stuff */
4236 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4237 net->prev_cwnd = net->cwnd;
4242 * CMT: Reset CUC and Fast recovery algo variables before
4245 net->new_pseudo_cumack = 0;
4246 net->will_exit_fast_recovery = 0;
4248 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4251 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4252 tp1 = TAILQ_LAST(&asoc->sent_queue,
4253 sctpchunk_listhead);
4254 send_s = tp1->rec.data.TSN_seq + 1;
4256 send_s = asoc->sending_seq;
4258 if ((cumack == send_s) ||
4259 compare_with_wrap(cumack, send_s, MAX_TSN)) {
4265 panic("Impossible sack 1");
4269 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4270 0, M_DONTWAIT, 1, MT_DATA);
4272 struct sctp_paramhdr *ph;
4275 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4277 ph = mtod(oper, struct sctp_paramhdr *);
4278 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4279 ph->param_length = htons(SCTP_BUF_LEN(oper));
4280 ippp = (uint32_t *) (ph + 1);
4281 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4283 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4284 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4289 asoc->this_sack_highest_gap = cumack;
4290 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4291 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4292 stcb->asoc.overall_error_count,
4294 SCTP_FROM_SCTP_INDATA,
4297 stcb->asoc.overall_error_count = 0;
4298 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
4299 /* process the new consecutive TSN first */
4300 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4302 tp2 = TAILQ_NEXT(tp1, sctp_next);
4303 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
4305 cumack == tp1->rec.data.TSN_seq) {
4306 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4307 printf("Warning, an unsent is now acked?\n");
4310 * ECN Nonce: Add the nonce to the sender's
4313 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4314 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4316 * If it is less than ACKED, it is
4317 * now no-longer in flight. Higher
4318 * values may occur during marking
4320 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4321 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4322 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4323 tp1->whoTo->flight_size,
4325 (uintptr_t) tp1->whoTo,
4326 tp1->rec.data.TSN_seq);
4328 sctp_flight_size_decrease(tp1);
4329 /* sa_ignore NO_NULL_CHK */
4330 sctp_total_flight_decrease(stcb, tp1);
4332 tp1->whoTo->net_ack += tp1->send_size;
4333 if (tp1->snd_count < 2) {
4335 * True non-retransmited
4338 tp1->whoTo->net_ack2 +=
4341 /* update RTO too? */
4348 sctp_calculate_rto(stcb,
4350 &tp1->sent_rcv_time,
4351 sctp_align_safe_nocopy);
4356 * CMT: CUCv2 algorithm. From the
4357 * cumack'd TSNs, for each TSN being
4358 * acked for the first time, set the
4359 * following variables for the
4360 * corresp destination.
4361 * new_pseudo_cumack will trigger a
4363 * find_(rtx_)pseudo_cumack will
4364 * trigger search for the next
4365 * expected (rtx-)pseudo-cumack.
4367 tp1->whoTo->new_pseudo_cumack = 1;
4368 tp1->whoTo->find_pseudo_cumack = 1;
4369 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4371 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4372 /* sa_ignore NO_NULL_CHK */
4373 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4376 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4377 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4379 if (tp1->rec.data.chunk_was_revoked) {
4380 /* deflate the cwnd */
4381 tp1->whoTo->cwnd -= tp1->book_size;
4382 tp1->rec.data.chunk_was_revoked = 0;
4384 tp1->sent = SCTP_DATAGRAM_ACKED;
4385 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4387 /* sa_ignore NO_NULL_CHK */
4388 sctp_free_bufspace(stcb, asoc, tp1, 1);
4389 sctp_m_freem(tp1->data);
4391 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4392 sctp_log_sack(asoc->last_acked_seq,
4394 tp1->rec.data.TSN_seq,
4397 SCTP_LOG_FREE_SENT);
4400 asoc->sent_queue_cnt--;
4401 sctp_free_a_chunk(stcb, tp1);
4409 /* sa_ignore NO_NULL_CHK */
4410 if (stcb->sctp_socket) {
4411 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4416 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4417 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4418 /* sa_ignore NO_NULL_CHK */
4419 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4421 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4422 so = SCTP_INP_SO(stcb->sctp_ep);
4423 atomic_add_int(&stcb->asoc.refcnt, 1);
4424 SCTP_TCB_UNLOCK(stcb);
4425 SCTP_SOCKET_LOCK(so, 1);
4426 SCTP_TCB_LOCK(stcb);
4427 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4428 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4429 /* assoc was freed while we were unlocked */
4430 SCTP_SOCKET_UNLOCK(so, 1);
4434 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4435 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4436 SCTP_SOCKET_UNLOCK(so, 1);
4439 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4440 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4444 /* JRS - Use the congestion control given in the CC module */
4445 if (asoc->last_acked_seq != cumack)
4446 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4448 asoc->last_acked_seq = cumack;
4450 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4451 /* nothing left in-flight */
4452 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4453 net->flight_size = 0;
4454 net->partial_bytes_acked = 0;
4456 asoc->total_flight = 0;
4457 asoc->total_flight_count = 0;
4459 /* ECN Nonce updates */
4460 if (asoc->ecn_nonce_allowed) {
4461 if (asoc->nonce_sum_check) {
4462 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4463 if (asoc->nonce_wait_for_ecne == 0) {
4464 struct sctp_tmit_chunk *lchk;
4466 lchk = TAILQ_FIRST(&asoc->send_queue);
4467 asoc->nonce_wait_for_ecne = 1;
4469 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4471 asoc->nonce_wait_tsn = asoc->sending_seq;
4474 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4475 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4477 * Misbehaving peer. We need
4478 * to react to this guy
4480 asoc->ecn_allowed = 0;
4481 asoc->ecn_nonce_allowed = 0;
4486 /* See if Resynchronization Possible */
4487 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4488 asoc->nonce_sum_check = 1;
4490 * now we must calculate what the base is.
4491 * We do this based on two things, we know
4492 * the total's for all the segments
4493 * gap-acked in the SACK (none), We also
4494 * know the SACK's nonce sum, its in
4495 * nonce_sum_flag. So we can build a truth
4496 * table to back-calculate the new value of
4497 * asoc->nonce_sum_expect_base:
4499 * SACK-flag-Value Seg-Sums Base 0 0 0
4503 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4508 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4509 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4510 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4511 /* SWS sender side engages */
4512 asoc->peers_rwnd = 0;
4514 if (asoc->peers_rwnd > old_rwnd) {
4515 win_probe_recovery = 1;
4517 /* Now assure a timer where data is queued at */
4520 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4523 if (win_probe_recovery && (net->window_probe)) {
4524 win_probe_recovered = 1;
4526 * Find first chunk that was used with window probe
4527 * and clear the sent
4529 /* sa_ignore FREED_MEMORY */
4530 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4531 if (tp1->window_probe) {
4532 sctp_window_probe_recovery(stcb, asoc, net, tp1);
4537 if (net->RTO == 0) {
4538 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4540 to_ticks = MSEC_TO_TICKS(net->RTO);
4542 if (net->flight_size) {
4544 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4545 sctp_timeout_handler, &net->rxt_timer);
4546 if (net->window_probe) {
4547 net->window_probe = 0;
4550 if (net->window_probe) {
4552 * In window probes we must assure a timer
4553 * is still running there
4555 net->window_probe = 0;
4556 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4557 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4558 sctp_timeout_handler, &net->rxt_timer);
4560 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4561 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4563 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4565 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4566 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4567 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4568 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4569 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4575 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4576 (asoc->sent_queue_retran_cnt == 0) &&
4577 (win_probe_recovered == 0) &&
4580 * huh, this should not happen unless all packets are
4581 * PR-SCTP and marked to skip of course.
4583 if (sctp_fs_audit(asoc)) {
4584 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4585 if (net->flight_size) {
4586 net->flight_size = 0;
4589 asoc->total_flight = 0;
4590 asoc->total_flight_count = 0;
4591 asoc->sent_queue_retran_cnt = 0;
4592 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4593 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4594 sctp_flight_size_increase(tp1);
4595 sctp_total_flight_increase(stcb, tp1);
4596 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4597 asoc->sent_queue_retran_cnt++;
4604 /**********************************/
4605 /* Now what about shutdown issues */
4606 /**********************************/
4607 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4608 /* nothing left on sendqueue.. consider done */
4610 if ((asoc->stream_queue_cnt == 1) &&
4611 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4612 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4613 (asoc->locked_on_sending)
4615 struct sctp_stream_queue_pending *sp;
4618 * I may be in a state where we got all across.. but
4619 * cannot write more due to a shutdown... we abort
4620 * since the user did not indicate EOR in this case.
4621 * The sp will be cleaned during free of the asoc.
4623 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4625 if ((sp) && (sp->length == 0)) {
4626 /* Let cleanup code purge it */
4627 if (sp->msg_is_complete) {
4628 asoc->stream_queue_cnt--;
4630 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4631 asoc->locked_on_sending = NULL;
4632 asoc->stream_queue_cnt--;
4636 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4637 (asoc->stream_queue_cnt == 0)) {
4638 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4639 /* Need to abort here */
4645 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4646 0, M_DONTWAIT, 1, MT_DATA);
4648 struct sctp_paramhdr *ph;
4651 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4653 ph = mtod(oper, struct sctp_paramhdr *);
4654 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4655 ph->param_length = htons(SCTP_BUF_LEN(oper));
4656 ippp = (uint32_t *) (ph + 1);
4657 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4659 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4660 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4662 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4663 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4664 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4666 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4667 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4668 sctp_stop_timers_for_shutdown(stcb);
4669 sctp_send_shutdown(stcb,
4670 stcb->asoc.primary_destination);
4671 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4672 stcb->sctp_ep, stcb, asoc->primary_destination);
4673 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4674 stcb->sctp_ep, stcb, asoc->primary_destination);
4676 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4677 (asoc->stream_queue_cnt == 0)) {
4678 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4681 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4682 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4683 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4684 sctp_send_shutdown_ack(stcb,
4685 stcb->asoc.primary_destination);
4687 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4688 stcb->sctp_ep, stcb, asoc->primary_destination);
4691 /*********************************************/
4692 /* Here we perform PR-SCTP procedures */
4694 /*********************************************/
4695 /* C1. update advancedPeerAckPoint */
4696 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4697 asoc->advanced_peer_ack_point = cumack;
4699 /* PR-Sctp issues need to be addressed too */
4700 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4701 struct sctp_tmit_chunk *lchk;
4702 uint32_t old_adv_peer_ack_point;
4704 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4705 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4706 /* C3. See if we need to send a Fwd-TSN */
4707 if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4710 * ISSUE with ECN, see FWD-TSN processing for notes
4711 * on issues that will occur when the ECN NONCE
4712 * stuff is put into SCTP for cross checking.
4714 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4716 send_forward_tsn(stcb, asoc);
4718 * ECN Nonce: Disable Nonce Sum check when
4719 * FWD TSN is sent and store resync tsn
4721 asoc->nonce_sum_check = 0;
4722 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4724 /* try to FR fwd-tsn's that get lost too */
4725 lchk->rec.data.fwd_tsn_cnt++;
4726 if (lchk->rec.data.fwd_tsn_cnt > 3) {
4727 send_forward_tsn(stcb, asoc);
4728 lchk->rec.data.fwd_tsn_cnt = 0;
4733 /* Assure a timer is up */
4734 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4735 stcb->sctp_ep, stcb, lchk->whoTo);
4738 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4739 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4741 stcb->asoc.peers_rwnd,
4742 stcb->asoc.total_flight,
4743 stcb->asoc.total_output_queue_size);
4748 sctp_handle_sack(struct mbuf *m, int offset,
4749 struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4750 struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd)
4752 struct sctp_association *asoc;
4753 struct sctp_sack *sack;
4754 struct sctp_tmit_chunk *tp1, *tp2;
4755 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4756 this_sack_lowest_newack;
4757 uint32_t sav_cum_ack;
4758 uint16_t num_seg, num_dup;
4759 uint16_t wake_him = 0;
4760 unsigned int sack_length;
4761 uint32_t send_s = 0;
4763 int accum_moved = 0;
4764 int will_exit_fast_recovery = 0;
4765 uint32_t a_rwnd, old_rwnd;
4766 int win_probe_recovery = 0;
4767 int win_probe_recovered = 0;
4768 struct sctp_nets *net = NULL;
4769 int nonce_sum_flag, ecn_seg_sums = 0;
4771 uint8_t reneged_all = 0;
4772 uint8_t cmt_dac_flag;
4775 * we take any chance we can to service our queues since we cannot
4776 * get awoken when the socket is read from :<
4779 * Now perform the actual SACK handling: 1) Verify that it is not an
4780 * old sack, if so discard. 2) If there is nothing left in the send
4781 * queue (cum-ack is equal to last acked) then you have a duplicate
4782 * too, update any rwnd change and verify no timers are running.
4783 * then return. 3) Process any new consequtive data i.e. cum-ack
4784 * moved process these first and note that it moved. 4) Process any
4785 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4786 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4787 * sync up flightsizes and things, stop all timers and also check
4788 * for shutdown_pending state. If so then go ahead and send off the
4789 * shutdown. If in shutdown recv, send off the shutdown-ack and
4790 * start that timer, Ret. 9) Strike any non-acked things and do FR
4791 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4792 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4793 * if in shutdown_recv state.
4795 SCTP_TCB_LOCK_ASSERT(stcb);
4798 this_sack_lowest_newack = 0;
4800 sack_length = (unsigned int)sack_len;
4802 SCTP_STAT_INCR(sctps_slowpath_sack);
4803 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4804 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4805 #ifdef SCTP_ASOCLOG_OF_TSNS
4806 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4807 stcb->asoc.cumack_log_at++;
4808 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4809 stcb->asoc.cumack_log_at = 0;
4812 num_seg = ntohs(sack->num_gap_ack_blks);
4816 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4817 num_dup = ntohs(sack->num_dup_tsns);
4819 old_rwnd = stcb->asoc.peers_rwnd;
4820 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4821 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4822 stcb->asoc.overall_error_count,
4824 SCTP_FROM_SCTP_INDATA,
4827 stcb->asoc.overall_error_count = 0;
4829 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4830 sctp_log_sack(asoc->last_acked_seq,
4837 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4838 int off_to_dup, iii;
4839 uint32_t *dupdata, dblock;
4841 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4842 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4843 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4844 sizeof(uint32_t), (uint8_t *) & dblock);
4845 off_to_dup += sizeof(uint32_t);
4847 for (iii = 0; iii < num_dup; iii++) {
4848 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4849 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4850 sizeof(uint32_t), (uint8_t *) & dblock);
4851 if (dupdata == NULL)
4853 off_to_dup += sizeof(uint32_t);
4857 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4858 off_to_dup, num_dup, sack_length, num_seg);
4861 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4863 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4864 tp1 = TAILQ_LAST(&asoc->sent_queue,
4865 sctpchunk_listhead);
4866 send_s = tp1->rec.data.TSN_seq + 1;
4868 send_s = asoc->sending_seq;
4870 if (cum_ack == send_s ||
4871 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4878 panic("Impossible sack 1");
4883 * no way, we have not even sent this TSN out yet.
4884 * Peer is hopelessly messed up with us.
4889 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4890 0, M_DONTWAIT, 1, MT_DATA);
4892 struct sctp_paramhdr *ph;
4895 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4897 ph = mtod(oper, struct sctp_paramhdr *);
4898 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4899 ph->param_length = htons(SCTP_BUF_LEN(oper));
4900 ippp = (uint32_t *) (ph + 1);
4901 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4903 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4904 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4909 /**********************/
4910 /* 1) check the range */
4911 /**********************/
4912 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4913 /* acking something behind */
4916 sav_cum_ack = asoc->last_acked_seq;
4918 /* update the Rwnd of the peer */
4919 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4920 TAILQ_EMPTY(&asoc->send_queue) &&
4921 (asoc->stream_queue_cnt == 0)
4923 /* nothing left on send/sent and strmq */
4924 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4925 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4926 asoc->peers_rwnd, 0, 0, a_rwnd);
4928 asoc->peers_rwnd = a_rwnd;
4929 if (asoc->sent_queue_retran_cnt) {
4930 asoc->sent_queue_retran_cnt = 0;
4932 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4933 /* SWS sender side engages */
4934 asoc->peers_rwnd = 0;
4936 /* stop any timers */
4937 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4938 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4939 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4940 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4941 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4942 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4943 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4944 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4947 net->partial_bytes_acked = 0;
4948 net->flight_size = 0;
4950 asoc->total_flight = 0;
4951 asoc->total_flight_count = 0;
4955 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4956 * things. The total byte count acked is tracked in netAckSz AND
4957 * netAck2 is used to track the total bytes acked that are un-
4958 * amibguious and were never retransmitted. We track these on a per
4959 * destination address basis.
4961 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4962 net->prev_cwnd = net->cwnd;
4967 * CMT: Reset CUC and Fast recovery algo variables before
4970 net->new_pseudo_cumack = 0;
4971 net->will_exit_fast_recovery = 0;
4973 /* process the new consecutive TSN first */
4974 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4976 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4978 last_tsn == tp1->rec.data.TSN_seq) {
4979 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4981 * ECN Nonce: Add the nonce to the sender's
4984 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4986 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4988 * If it is less than ACKED, it is
4989 * now no-longer in flight. Higher
4990 * values may occur during marking
4992 if ((tp1->whoTo->dest_state &
4993 SCTP_ADDR_UNCONFIRMED) &&
4994 (tp1->snd_count < 2)) {
4996 * If there was no retran
4997 * and the address is
4998 * un-confirmed and we sent
5000 * sacked.. its confirmed,
5003 tp1->whoTo->dest_state &=
5004 ~SCTP_ADDR_UNCONFIRMED;
5006 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5007 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5008 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
5009 tp1->whoTo->flight_size,
5011 (uintptr_t) tp1->whoTo,
5012 tp1->rec.data.TSN_seq);
5014 sctp_flight_size_decrease(tp1);
5015 sctp_total_flight_decrease(stcb, tp1);
5017 tp1->whoTo->net_ack += tp1->send_size;
5019 /* CMT SFR and DAC algos */
5020 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
5021 tp1->whoTo->saw_newack = 1;
5023 if (tp1->snd_count < 2) {
5025 * True non-retransmited
5028 tp1->whoTo->net_ack2 +=
5031 /* update RTO too? */
5034 sctp_calculate_rto(stcb,
5036 &tp1->sent_rcv_time,
5037 sctp_align_safe_nocopy);
5042 * CMT: CUCv2 algorithm. From the
5043 * cumack'd TSNs, for each TSN being
5044 * acked for the first time, set the
5045 * following variables for the
5046 * corresp destination.
5047 * new_pseudo_cumack will trigger a
5049 * find_(rtx_)pseudo_cumack will
5050 * trigger search for the next
5051 * expected (rtx-)pseudo-cumack.
5053 tp1->whoTo->new_pseudo_cumack = 1;
5054 tp1->whoTo->find_pseudo_cumack = 1;
5055 tp1->whoTo->find_rtx_pseudo_cumack = 1;
5058 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5059 sctp_log_sack(asoc->last_acked_seq,
5061 tp1->rec.data.TSN_seq,
5064 SCTP_LOG_TSN_ACKED);
5066 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
5067 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
5070 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5071 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
5072 #ifdef SCTP_AUDITING_ENABLED
5073 sctp_audit_log(0xB3,
5074 (asoc->sent_queue_retran_cnt & 0x000000ff));
5077 if (tp1->rec.data.chunk_was_revoked) {
5078 /* deflate the cwnd */
5079 tp1->whoTo->cwnd -= tp1->book_size;
5080 tp1->rec.data.chunk_was_revoked = 0;
5082 tp1->sent = SCTP_DATAGRAM_ACKED;
5087 tp1 = TAILQ_NEXT(tp1, sctp_next);
5089 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
5090 /* always set this up to cum-ack */
5091 asoc->this_sack_highest_gap = last_tsn;
5093 /* Move offset up to point to gaps/dups */
5094 offset += sizeof(struct sctp_sack_chunk);
5095 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
5097 /* skip corrupt segments */
5103 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
5104 * to be greater than the cumack. Also reset saw_newack to 0
5107 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5108 net->saw_newack = 0;
5109 net->this_sack_highest_newack = last_tsn;
5113 * thisSackHighestGap will increase while handling NEW
5114 * segments this_sack_highest_newack will increase while
5115 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
5116 * used for CMT DAC algo. saw_newack will also change.
5118 sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn,
5119 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
5120 num_seg, &ecn_seg_sums);
5122 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
5124 * validate the biggest_tsn_acked in the gap acks if
5125 * strict adherence is wanted.
5127 if ((biggest_tsn_acked == send_s) ||
5128 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
5130 * peer is either confused or we are under
5131 * attack. We must abort.
5138 /*******************************************/
5139 /* cancel ALL T3-send timer if accum moved */
5140 /*******************************************/
5141 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
5142 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5143 if (net->new_pseudo_cumack)
5144 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5146 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
5151 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5152 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5153 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
5157 /********************************************/
5158 /* drop the acked chunks from the sendqueue */
5159 /********************************************/
5160 asoc->last_acked_seq = cum_ack;
5162 tp1 = TAILQ_FIRST(&asoc->sent_queue);
5166 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
5170 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
5171 /* no more sent on list */
5172 printf("Warning, tp1->sent == %d and its now acked?\n",
5175 tp2 = TAILQ_NEXT(tp1, sctp_next);
5176 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
5177 if (tp1->pr_sctp_on) {
5178 if (asoc->pr_sctp_cnt != 0)
5179 asoc->pr_sctp_cnt--;
5181 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
5182 (asoc->total_flight > 0)) {
5184 panic("Warning flight size is postive and should be 0");
5186 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
5187 asoc->total_flight);
5189 asoc->total_flight = 0;
5192 /* sa_ignore NO_NULL_CHK */
5193 sctp_free_bufspace(stcb, asoc, tp1, 1);
5194 sctp_m_freem(tp1->data);
5195 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5196 asoc->sent_queue_cnt_removeable--;
5199 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5200 sctp_log_sack(asoc->last_acked_seq,
5202 tp1->rec.data.TSN_seq,
5205 SCTP_LOG_FREE_SENT);
5208 asoc->sent_queue_cnt--;
5209 sctp_free_a_chunk(stcb, tp1);
5212 } while (tp1 != NULL);
5215 /* sa_ignore NO_NULL_CHK */
5216 if ((wake_him) && (stcb->sctp_socket)) {
5217 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5221 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
5222 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5223 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
5225 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5226 so = SCTP_INP_SO(stcb->sctp_ep);
5227 atomic_add_int(&stcb->asoc.refcnt, 1);
5228 SCTP_TCB_UNLOCK(stcb);
5229 SCTP_SOCKET_LOCK(so, 1);
5230 SCTP_TCB_LOCK(stcb);
5231 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5232 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5233 /* assoc was freed while we were unlocked */
5234 SCTP_SOCKET_UNLOCK(so, 1);
5238 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
5239 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5240 SCTP_SOCKET_UNLOCK(so, 1);
5243 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5244 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
5248 if (asoc->fast_retran_loss_recovery && accum_moved) {
5249 if (compare_with_wrap(asoc->last_acked_seq,
5250 asoc->fast_recovery_tsn, MAX_TSN) ||
5251 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
5252 /* Setup so we will exit RFC2582 fast recovery */
5253 will_exit_fast_recovery = 1;
5257 * Check for revoked fragments:
5259 * if Previous sack - Had no frags then we can't have any revoked if
5260 * Previous sack - Had frag's then - If we now have frags aka
5261 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
5262 * some of them. else - The peer revoked all ACKED fragments, since
5263 * we had some before and now we have NONE.
5267 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
5268 else if (asoc->saw_sack_with_frags) {
5269 int cnt_revoked = 0;
5271 tp1 = TAILQ_FIRST(&asoc->sent_queue);
5273 /* Peer revoked all dg's marked or acked */
5274 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5275 if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
5276 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
5277 tp1->sent = SCTP_DATAGRAM_SENT;
5278 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5279 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
5280 tp1->whoTo->flight_size,
5282 (uintptr_t) tp1->whoTo,
5283 tp1->rec.data.TSN_seq);
5285 sctp_flight_size_increase(tp1);
5286 sctp_total_flight_increase(stcb, tp1);
5287 tp1->rec.data.chunk_was_revoked = 1;
5289 * To ensure that this increase in
5290 * flightsize, which is artificial,
5291 * does not throttle the sender, we
5292 * also increase the cwnd
5295 tp1->whoTo->cwnd += tp1->book_size;
5303 asoc->saw_sack_with_frags = 0;
5306 asoc->saw_sack_with_frags = 1;
5308 asoc->saw_sack_with_frags = 0;
5310 /* JRS - Use the congestion control given in the CC module */
5311 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5313 if (TAILQ_EMPTY(&asoc->sent_queue)) {
5314 /* nothing left in-flight */
5315 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5316 /* stop all timers */
5317 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5318 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5319 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5320 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5321 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
5324 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5325 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5326 net->flight_size = 0;
5327 net->partial_bytes_acked = 0;
5329 asoc->total_flight = 0;
5330 asoc->total_flight_count = 0;
5332 /**********************************/
5333 /* Now what about shutdown issues */
5334 /**********************************/
5335 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5336 /* nothing left on sendqueue.. consider done */
5337 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5338 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5339 asoc->peers_rwnd, 0, 0, a_rwnd);
5341 asoc->peers_rwnd = a_rwnd;
5342 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5343 /* SWS sender side engages */
5344 asoc->peers_rwnd = 0;
5347 if ((asoc->stream_queue_cnt == 1) &&
5348 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5349 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5350 (asoc->locked_on_sending)
5352 struct sctp_stream_queue_pending *sp;
5355 * I may be in a state where we got all across.. but
5356 * cannot write more due to a shutdown... we abort
5357 * since the user did not indicate EOR in this case.
5359 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5361 if ((sp) && (sp->length == 0)) {
5362 asoc->locked_on_sending = NULL;
5363 if (sp->msg_is_complete) {
5364 asoc->stream_queue_cnt--;
5366 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5367 asoc->stream_queue_cnt--;
5371 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5372 (asoc->stream_queue_cnt == 0)) {
5373 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5374 /* Need to abort here */
5380 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5381 0, M_DONTWAIT, 1, MT_DATA);
5383 struct sctp_paramhdr *ph;
5386 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5388 ph = mtod(oper, struct sctp_paramhdr *);
5389 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5390 ph->param_length = htons(SCTP_BUF_LEN(oper));
5391 ippp = (uint32_t *) (ph + 1);
5392 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5394 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5395 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5398 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5399 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5400 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5402 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5403 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5404 sctp_stop_timers_for_shutdown(stcb);
5405 sctp_send_shutdown(stcb,
5406 stcb->asoc.primary_destination);
5407 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5408 stcb->sctp_ep, stcb, asoc->primary_destination);
5409 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5410 stcb->sctp_ep, stcb, asoc->primary_destination);
5413 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5414 (asoc->stream_queue_cnt == 0)) {
5415 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5418 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5419 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5420 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5421 sctp_send_shutdown_ack(stcb,
5422 stcb->asoc.primary_destination);
5424 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5425 stcb->sctp_ep, stcb, asoc->primary_destination);
5430 * Now here we are going to recycle net_ack for a different use...
5433 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5438 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5439 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5440 * automatically ensure that.
5442 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5443 this_sack_lowest_newack = cum_ack;
5446 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5447 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5449 /* JRS - Use the congestion control given in the CC module */
5450 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5452 /******************************************************************
5453 * Here we do the stuff with ECN Nonce checking.
5454 * We basically check to see if the nonce sum flag was incorrect
5455 * or if resynchronization needs to be done. Also if we catch a
5456 * misbehaving receiver we give him the kick.
5457 ******************************************************************/
5459 if (asoc->ecn_nonce_allowed) {
5460 if (asoc->nonce_sum_check) {
5461 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5462 if (asoc->nonce_wait_for_ecne == 0) {
5463 struct sctp_tmit_chunk *lchk;
5465 lchk = TAILQ_FIRST(&asoc->send_queue);
5466 asoc->nonce_wait_for_ecne = 1;
5468 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5470 asoc->nonce_wait_tsn = asoc->sending_seq;
5473 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5474 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5476 * Misbehaving peer. We need
5477 * to react to this guy
5479 asoc->ecn_allowed = 0;
5480 asoc->ecn_nonce_allowed = 0;
5485 /* See if Resynchronization Possible */
5486 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5487 asoc->nonce_sum_check = 1;
5489 * now we must calculate what the base is.
5490 * We do this based on two things, we know
5491 * the total's for all the segments
5492 * gap-acked in the SACK, its stored in
5493 * ecn_seg_sums. We also know the SACK's
5494 * nonce sum, its in nonce_sum_flag. So we
5495 * can build a truth table to back-calculate
5497 * asoc->nonce_sum_expect_base:
5499 * SACK-flag-Value Seg-Sums Base 0 0 0
5503 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5507 /* Now are we exiting loss recovery ? */
5508 if (will_exit_fast_recovery) {
5509 /* Ok, we must exit fast recovery */
5510 asoc->fast_retran_loss_recovery = 0;
5512 if ((asoc->sat_t3_loss_recovery) &&
5513 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5515 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5516 /* end satellite t3 loss recovery */
5517 asoc->sat_t3_loss_recovery = 0;
5522 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5523 if (net->will_exit_fast_recovery) {
5524 /* Ok, we must exit fast recovery */
5525 net->fast_retran_loss_recovery = 0;
5529 /* Adjust and set the new rwnd value */
5530 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5531 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5532 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5534 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5535 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5536 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5537 /* SWS sender side engages */
5538 asoc->peers_rwnd = 0;
5540 if (asoc->peers_rwnd > old_rwnd) {
5541 win_probe_recovery = 1;
5544 * Now we must setup so we have a timer up for anyone with
5550 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5551 if (win_probe_recovery && (net->window_probe)) {
5552 win_probe_recovered = 1;
5554 * Find first chunk that was used with
5555 * window probe and clear the event. Put
5556 * it back into the send queue as if has
5559 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5560 if (tp1->window_probe) {
5561 sctp_window_probe_recovery(stcb, asoc, net, tp1);
5566 if (net->flight_size) {
5568 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5569 stcb->sctp_ep, stcb, net);
5570 if (net->window_probe) {
5573 if (net->window_probe) {
5575 * In window probes we must assure a timer
5576 * is still running there
5579 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5580 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5581 stcb->sctp_ep, stcb, net);
5584 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5585 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5587 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5589 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5590 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5591 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5592 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5593 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5599 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5600 (asoc->sent_queue_retran_cnt == 0) &&
5601 (win_probe_recovered == 0) &&
5604 * huh, this should not happen unless all packets are
5605 * PR-SCTP and marked to skip of course.
5607 if (sctp_fs_audit(asoc)) {
5608 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5609 net->flight_size = 0;
5611 asoc->total_flight = 0;
5612 asoc->total_flight_count = 0;
5613 asoc->sent_queue_retran_cnt = 0;
5614 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5615 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5616 sctp_flight_size_increase(tp1);
5617 sctp_total_flight_increase(stcb, tp1);
5618 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5619 asoc->sent_queue_retran_cnt++;
5626 /* Fix up the a-p-a-p for future PR-SCTP sends */
5627 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5628 asoc->advanced_peer_ack_point = cum_ack;
5630 /* C2. try to further move advancedPeerAckPoint ahead */
5631 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5632 struct sctp_tmit_chunk *lchk;
5633 uint32_t old_adv_peer_ack_point;
5635 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5636 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5637 /* C3. See if we need to send a Fwd-TSN */
5638 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5641 * ISSUE with ECN, see FWD-TSN processing for notes
5642 * on issues that will occur when the ECN NONCE
5643 * stuff is put into SCTP for cross checking.
5645 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5646 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5647 0xee, cum_ack, asoc->advanced_peer_ack_point,
5648 old_adv_peer_ack_point);
5650 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5652 send_forward_tsn(stcb, asoc);
5654 * ECN Nonce: Disable Nonce Sum check when
5655 * FWD TSN is sent and store resync tsn
5657 asoc->nonce_sum_check = 0;
5658 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5660 /* try to FR fwd-tsn's that get lost too */
5661 lchk->rec.data.fwd_tsn_cnt++;
5662 if (lchk->rec.data.fwd_tsn_cnt > 3) {
5663 send_forward_tsn(stcb, asoc);
5664 lchk->rec.data.fwd_tsn_cnt = 0;
5669 /* Assure a timer is up */
5670 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5671 stcb->sctp_ep, stcb, lchk->whoTo);
5674 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5675 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5677 stcb->asoc.peers_rwnd,
5678 stcb->asoc.total_flight,
5679 stcb->asoc.total_output_queue_size);
5684 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5685 struct sctp_nets *netp, int *abort_flag)
5688 uint32_t cum_ack, a_rwnd;
5690 cum_ack = ntohl(cp->cumulative_tsn_ack);
5691 /* Arrange so a_rwnd does NOT change */
5692 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5694 /* Now call the express sack handling */
5695 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5699 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5700 struct sctp_stream_in *strmin)
5702 struct sctp_queued_to_read *ctl, *nctl;
5703 struct sctp_association *asoc;
5706 /* EY -used to calculate nr_gap information */
5707 uint32_t nr_tsn, nr_gap;
5710 tt = strmin->last_sequence_delivered;
5712 * First deliver anything prior to and including the stream no that
5715 ctl = TAILQ_FIRST(&strmin->inqueue);
5717 nctl = TAILQ_NEXT(ctl, next);
5718 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5719 (tt == ctl->sinfo_ssn)) {
5720 /* this is deliverable now */
5721 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5722 /* subtract pending on streams */
5723 asoc->size_on_all_streams -= ctl->length;
5724 sctp_ucount_decr(asoc->cnt_on_all_streams);
5725 /* deliver it to at least the delivery-q */
5726 if (stcb->sctp_socket) {
5727 /* EY need the tsn info for calculating nr */
5728 nr_tsn = ctl->sinfo_tsn;
5729 sctp_add_to_readq(stcb->sctp_ep, stcb,
5731 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5733 * EY this is the chunk that should be
5734 * tagged nr gapped calculate the gap and
5735 * such then tag this TSN nr
5736 * chk->rec.data.TSN_seq
5738 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5740 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
5741 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5742 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5744 * EY These should never
5745 * happen- explained before
5748 SCTP_TCB_LOCK_ASSERT(stcb);
5749 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5750 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
5751 if (compare_with_wrap(nr_tsn,
5752 asoc->highest_tsn_inside_nr_map,
5754 asoc->highest_tsn_inside_nr_map = nr_tsn;
5756 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5759 * sctp_kick_prsctp_reorder_q
5760 * ueue(7): Something wrong,
5761 * the TSN to be tagged"
5762 * "\nas NR is not even in
5763 * the mapping_array, or map
5768 * EY - not %100 sure about
5769 * the lock thing, don't
5770 * think its required
5773 * SCTP_TCB_LOCK_ASSERT(stcb)
5778 * printf("\nCalculating an
5779 * nr_gap!!\nmapping_array_si
5781 * nr_mapping_array_size =
5782 * %d" "\nmapping_array_base
5784 * nr_mapping_array_base =
5785 * %d\nhighest_tsn_inside_map
5787 * "highest_tsn_inside_nr_map
5788 * = %d\nTSN = %d nr_gap =
5789 * %d",asoc->mapping_array_si
5791 * asoc->nr_mapping_array_siz
5793 * asoc->mapping_array_base_t
5795 * asoc->nr_mapping_array_bas
5797 * asoc->highest_tsn_inside_m
5799 * asoc->highest_tsn_inside_n
5800 * r_map,tsn,nr_gap);
5806 /* no more delivery now. */
5812 * now we must deliver things in queue the normal way if any are
5815 tt = strmin->last_sequence_delivered + 1;
5816 ctl = TAILQ_FIRST(&strmin->inqueue);
5818 nctl = TAILQ_NEXT(ctl, next);
5819 if (tt == ctl->sinfo_ssn) {
5820 /* this is deliverable now */
5821 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5822 /* subtract pending on streams */
5823 asoc->size_on_all_streams -= ctl->length;
5824 sctp_ucount_decr(asoc->cnt_on_all_streams);
5825 /* deliver it to at least the delivery-q */
5826 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5827 if (stcb->sctp_socket) {
5829 nr_tsn = ctl->sinfo_tsn;
5830 sctp_add_to_readq(stcb->sctp_ep, stcb,
5832 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5834 * EY this is the chunk that should be
5835 * tagged nr gapped calculate the gap and
5836 * such then tag this TSN nr
5837 * chk->rec.data.TSN_seq
5839 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5840 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
5841 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5842 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5844 * EY These should never
5845 * happen, explained before
5848 SCTP_TCB_LOCK_ASSERT(stcb);
5849 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5850 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
5851 if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
5853 asoc->highest_tsn_inside_nr_map = nr_tsn;
5855 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5858 * sctp_kick_prsctp_reorder_q
5859 * ueue(8): Something wrong,
5860 * the TSN to be tagged"
5861 * "\nas NR is not even in
5862 * the mapping_array, or map
5867 * EY - not %100 sure about
5868 * the lock thing, don't
5869 * think its required
5872 * SCTP_TCB_LOCK_ASSERT(stcb)
5877 * printf("\nCalculating an
5878 * nr_gap!!\nmapping_array_si
5880 * nr_mapping_array_size =
5881 * %d" "\nmapping_array_base
5883 * nr_mapping_array_base =
5884 * %d\nhighest_tsn_inside_map
5886 * "highest_tsn_inside_nr_map
5887 * = %d\nTSN = %d nr_gap =
5888 * %d",asoc->mapping_array_si
5890 * asoc->nr_mapping_array_siz
5892 * asoc->mapping_array_base_t
5894 * asoc->nr_mapping_array_bas
5896 * asoc->highest_tsn_inside_m
5898 * asoc->highest_tsn_inside_n
5899 * r_map,tsn,nr_gap);
5904 tt = strmin->last_sequence_delivered + 1;
5913 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5914 struct sctp_association *asoc,
5915 uint16_t stream, uint16_t seq)
5917 struct sctp_tmit_chunk *chk, *at;
5919 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5920 /* For each one on here see if we need to toss it */
5922 * For now large messages held on the reasmqueue that are
5923 * complete will be tossed too. We could in theory do more
5924 * work to spin through and stop after dumping one msg aka
5925 * seeing the start of a new msg at the head, and call the
5926 * delivery function... to see if it can be delivered... But
5927 * for now we just dump everything on the queue.
5929 chk = TAILQ_FIRST(&asoc->reasmqueue);
5931 at = TAILQ_NEXT(chk, sctp_next);
5933 * Do not toss it if on a different stream or marked
5934 * for unordered delivery in which case the stream
5935 * sequence number has no meaning.
5937 if ((chk->rec.data.stream_number != stream) ||
5938 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5942 if (chk->rec.data.stream_seq == seq) {
5943 /* It needs to be tossed */
5944 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5945 if (compare_with_wrap(chk->rec.data.TSN_seq,
5946 asoc->tsn_last_delivered, MAX_TSN)) {
5947 asoc->tsn_last_delivered =
5948 chk->rec.data.TSN_seq;
5949 asoc->str_of_pdapi =
5950 chk->rec.data.stream_number;
5951 asoc->ssn_of_pdapi =
5952 chk->rec.data.stream_seq;
5953 asoc->fragment_flags =
5954 chk->rec.data.rcv_flags;
5956 asoc->size_on_reasm_queue -= chk->send_size;
5957 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5959 /* Clear up any stream problem */
5960 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5961 SCTP_DATA_UNORDERED &&
5962 (compare_with_wrap(chk->rec.data.stream_seq,
5963 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5966 * We must dump forward this streams
5967 * sequence number if the chunk is
5968 * not unordered that is being
5969 * skipped. There is a chance that
5970 * if the peer does not include the
5971 * last fragment in its FWD-TSN we
5972 * WILL have a problem here since
5973 * you would have a partial chunk in
5974 * queue that may not be
5975 * deliverable. Also if a Partial
5976 * delivery API as started the user
5977 * may get a partial chunk. The next
5978 * read returning a new chunk...
5979 * really ugly but I see no way
5980 * around it! Maybe a notify??
5982 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5983 chk->rec.data.stream_seq;
5986 sctp_m_freem(chk->data);
5989 sctp_free_a_chunk(stcb, chk);
5990 } else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5992 * If the stream_seq is > than the purging
6004 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
6005 struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
6008 * ISSUES that MUST be fixed for ECN! When we are the sender of the
6009 * forward TSN, when the SACK comes back that acknowledges the
6010 * FWD-TSN we must reset the NONCE sum to match correctly. This will
6011 * get quite tricky since we may have sent more data interveneing
6012 * and must carefully account for what the SACK says on the nonce
6013 * and any gaps that are reported. This work will NOT be done here,
6014 * but I note it here since it is really related to PR-SCTP and
6018 /* The pr-sctp fwd tsn */
6020 * here we will perform all the data receiver side steps for
6021 * processing FwdTSN, as required in by pr-sctp draft:
6023 * Assume we get FwdTSN(x):
6025 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
6026 * others we have 3) examine and update re-ordering queue on
6027 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
6028 * report where we are.
6030 struct sctp_association *asoc;
6031 uint32_t new_cum_tsn, gap;
6032 unsigned int i, fwd_sz, cumack_set_flag, m_size;
6034 struct sctp_stream_in *strm;
6035 struct sctp_tmit_chunk *chk, *at;
6036 struct sctp_queued_to_read *ctl, *sv;
6038 cumack_set_flag = 0;
6040 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
6041 SCTPDBG(SCTP_DEBUG_INDATA1,
6042 "Bad size too small/big fwd-tsn\n");
6045 m_size = (stcb->asoc.mapping_array_size << 3);
6046 /*************************************************************/
6047 /* 1. Here we update local cumTSN and shift the bitmap array */
6048 /*************************************************************/
6049 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
6051 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
6052 asoc->cumulative_tsn == new_cum_tsn) {
6053 /* Already got there ... */
6056 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
6058 asoc->highest_tsn_inside_map = new_cum_tsn;
6059 /* EY nr_mapping_array version of the above */
6061 * if(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
6062 * asoc->peer_supports_nr_sack)
6064 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6065 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6066 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6070 * now we know the new TSN is more advanced, let's find the actual
6073 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
6074 if (gap >= m_size) {
6075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6076 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6078 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
6082 * out of range (of single byte chunks in the rwnd I
6083 * give out). This must be an attacker.
6086 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
6087 0, M_DONTWAIT, 1, MT_DATA);
6089 struct sctp_paramhdr *ph;
6092 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6093 (sizeof(uint32_t) * 3);
6094 ph = mtod(oper, struct sctp_paramhdr *);
6095 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6096 ph->param_length = htons(SCTP_BUF_LEN(oper));
6097 ippp = (uint32_t *) (ph + 1);
6098 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
6100 *ippp = asoc->highest_tsn_inside_map;
6102 *ippp = new_cum_tsn;
6104 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
6105 sctp_abort_an_association(stcb->sctp_ep, stcb,
6106 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6109 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
6110 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
6111 cumack_set_flag = 1;
6112 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
6113 asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
6114 /* EY - nr_sack: nr_mapping_array version of the above */
6115 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
6116 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
6117 asoc->nr_mapping_array_base_tsn = new_cum_tsn + 1;
6118 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6119 if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
6121 * printf("IN sctp_handle_forward_tsn:
6122 * Something is wrong the size of" "map and
6123 * nr_map should be equal!")
6127 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6128 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6130 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
6132 SCTP_TCB_LOCK_ASSERT(stcb);
6133 for (i = 0; i <= gap; i++) {
6134 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack
6135 && SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
6136 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
6138 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
6142 * Now after marking all, slide thing forward but no sack
6145 sctp_sack_check(stcb, 0, 0, abort_flag);
6149 /*************************************************************/
6150 /* 2. Clear up re-assembly queue */
6151 /*************************************************************/
6153 * First service it if pd-api is up, just in case we can progress it
6156 if (asoc->fragmented_delivery_inprogress) {
6157 sctp_service_reassembly(stcb, asoc);
6159 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
6160 /* For each one on here see if we need to toss it */
6162 * For now large messages held on the reasmqueue that are
6163 * complete will be tossed too. We could in theory do more
6164 * work to spin through and stop after dumping one msg aka
6165 * seeing the start of a new msg at the head, and call the
6166 * delivery function... to see if it can be delivered... But
6167 * for now we just dump everything on the queue.
6169 chk = TAILQ_FIRST(&asoc->reasmqueue);
6171 at = TAILQ_NEXT(chk, sctp_next);
6172 if ((compare_with_wrap(new_cum_tsn,
6173 chk->rec.data.TSN_seq, MAX_TSN)) ||
6174 (new_cum_tsn == chk->rec.data.TSN_seq)) {
6175 /* It needs to be tossed */
6176 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
6177 if (compare_with_wrap(chk->rec.data.TSN_seq,
6178 asoc->tsn_last_delivered, MAX_TSN)) {
6179 asoc->tsn_last_delivered =
6180 chk->rec.data.TSN_seq;
6181 asoc->str_of_pdapi =
6182 chk->rec.data.stream_number;
6183 asoc->ssn_of_pdapi =
6184 chk->rec.data.stream_seq;
6185 asoc->fragment_flags =
6186 chk->rec.data.rcv_flags;
6188 asoc->size_on_reasm_queue -= chk->send_size;
6189 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
6191 /* Clear up any stream problem */
6192 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
6193 SCTP_DATA_UNORDERED &&
6194 (compare_with_wrap(chk->rec.data.stream_seq,
6195 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
6198 * We must dump forward this streams
6199 * sequence number if the chunk is
6200 * not unordered that is being
6201 * skipped. There is a chance that
6202 * if the peer does not include the
6203 * last fragment in its FWD-TSN we
6204 * WILL have a problem here since
6205 * you would have a partial chunk in
6206 * queue that may not be
6207 * deliverable. Also if a Partial
6208 * delivery API as started the user
6209 * may get a partial chunk. The next
6210 * read returning a new chunk...
6211 * really ugly but I see no way
6212 * around it! Maybe a notify??
6214 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
6215 chk->rec.data.stream_seq;
6218 sctp_m_freem(chk->data);
6221 sctp_free_a_chunk(stcb, chk);
6224 * Ok we have gone beyond the end of the
6232 /*******************************************************/
6233 /* 3. Update the PR-stream re-ordering queues and fix */
6234 /* delivery issues as needed. */
6235 /*******************************************************/
6236 fwd_sz -= sizeof(*fwd);
6239 unsigned int num_str;
6240 struct sctp_strseq *stseq, strseqbuf;
6242 offset += sizeof(*fwd);
6244 SCTP_INP_READ_LOCK(stcb->sctp_ep);
6245 num_str = fwd_sz / sizeof(struct sctp_strseq);
6246 for (i = 0; i < num_str; i++) {
6249 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
6250 sizeof(struct sctp_strseq),
6251 (uint8_t *) & strseqbuf);
6252 offset += sizeof(struct sctp_strseq);
6253 if (stseq == NULL) {
6257 st = ntohs(stseq->stream);
6259 st = ntohs(stseq->sequence);
6260 stseq->sequence = st;
6265 * Ok we now look for the stream/seq on the read
6266 * queue where its not all delivered. If we find it
6267 * we transmute the read entry into a PDI_ABORTED.
6269 if (stseq->stream >= asoc->streamincnt) {
6270 /* screwed up streams, stop! */
6273 if ((asoc->str_of_pdapi == stseq->stream) &&
6274 (asoc->ssn_of_pdapi == stseq->sequence)) {
6276 * If this is the one we were partially
6277 * delivering now then we no longer are.
6278 * Note this will change with the reassembly
6281 asoc->fragmented_delivery_inprogress = 0;
6283 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
6284 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
6285 if ((ctl->sinfo_stream == stseq->stream) &&
6286 (ctl->sinfo_ssn == stseq->sequence)) {
6287 str_seq = (stseq->stream << 16) | stseq->sequence;
6289 ctl->pdapi_aborted = 1;
6290 sv = stcb->asoc.control_pdapi;
6291 stcb->asoc.control_pdapi = ctl;
6292 sctp_notify_partial_delivery_indication(stcb,
6293 SCTP_PARTIAL_DELIVERY_ABORTED,
6296 stcb->asoc.control_pdapi = sv;
6298 } else if ((ctl->sinfo_stream == stseq->stream) &&
6299 (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
6300 /* We are past our victim SSN */
6304 strm = &asoc->strmin[stseq->stream];
6305 if (compare_with_wrap(stseq->sequence,
6306 strm->last_sequence_delivered, MAX_SEQ)) {
6307 /* Update the sequence number */
6308 strm->last_sequence_delivered =
6311 /* now kick the stream the new way */
6312 /* sa_ignore NO_NULL_CHK */
6313 sctp_kick_prsctp_reorder_queue(stcb, strm);
6315 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
6317 if (TAILQ_FIRST(&asoc->reasmqueue)) {
6318 /* now lets kick out and check for more fragmented delivery */
6319 /* sa_ignore NO_NULL_CHK */
6320 sctp_deliver_reasm_check(stcb, &stcb->asoc);
6324 /* EY fully identical to sctp_express_handle_sack, duplicated for only naming convention */
6326 sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
6327 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
6329 struct sctp_nets *net;
6330 struct sctp_association *asoc;
6331 struct sctp_tmit_chunk *tp1, *tp2;
6333 int win_probe_recovery = 0;
6334 int win_probe_recovered = 0;
6335 int j, done_once = 0;
6337 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
6338 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
6339 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
6341 SCTP_TCB_LOCK_ASSERT(stcb);
6342 #ifdef SCTP_ASOCLOG_OF_TSNS
6343 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
6344 stcb->asoc.cumack_log_at++;
6345 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
6346 stcb->asoc.cumack_log_at = 0;
6350 old_rwnd = asoc->peers_rwnd;
6351 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
6354 } else if (asoc->last_acked_seq == cumack) {
6355 /* Window update sack */
6356 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6357 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6358 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6359 /* SWS sender side engages */
6360 asoc->peers_rwnd = 0;
6362 if (asoc->peers_rwnd > old_rwnd) {
6367 /* First setup for CC stuff */
6368 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6369 net->prev_cwnd = net->cwnd;
6374 * CMT: Reset CUC and Fast recovery algo variables before
6377 net->new_pseudo_cumack = 0;
6378 net->will_exit_fast_recovery = 0;
6380 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
6383 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
6384 tp1 = TAILQ_LAST(&asoc->sent_queue,
6385 sctpchunk_listhead);
6386 send_s = tp1->rec.data.TSN_seq + 1;
6388 send_s = asoc->sending_seq;
6390 if ((cumack == send_s) ||
6391 compare_with_wrap(cumack, send_s, MAX_TSN)) {
6397 panic("Impossible sack 1");
6401 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6402 0, M_DONTWAIT, 1, MT_DATA);
6404 struct sctp_paramhdr *ph;
6407 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6409 ph = mtod(oper, struct sctp_paramhdr *);
6410 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6411 ph->param_length = htons(SCTP_BUF_LEN(oper));
6412 ippp = (uint32_t *) (ph + 1);
6413 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
6415 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
6416 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6421 asoc->this_sack_highest_gap = cumack;
6422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
6423 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
6424 stcb->asoc.overall_error_count,
6426 SCTP_FROM_SCTP_INDATA,
6429 stcb->asoc.overall_error_count = 0;
6430 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
6431 /* process the new consecutive TSN first */
6432 tp1 = TAILQ_FIRST(&asoc->sent_queue);
6434 tp2 = TAILQ_NEXT(tp1, sctp_next);
6435 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
6437 cumack == tp1->rec.data.TSN_seq) {
6438 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
6439 printf("Warning, an unsent is now acked?\n");
6442 * ECN Nonce: Add the nonce to the sender's
6445 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
6446 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
6448 * If it is less than ACKED, it is
6449 * now no-longer in flight. Higher
6450 * values may occur during marking
6452 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6453 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6454 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
6455 tp1->whoTo->flight_size,
6457 (uintptr_t) tp1->whoTo,
6458 tp1->rec.data.TSN_seq);
6460 sctp_flight_size_decrease(tp1);
6461 /* sa_ignore NO_NULL_CHK */
6462 sctp_total_flight_decrease(stcb, tp1);
6464 tp1->whoTo->net_ack += tp1->send_size;
6465 if (tp1->snd_count < 2) {
6467 * True non-retransmited
6470 tp1->whoTo->net_ack2 +=
6473 /* update RTO too? */
6480 sctp_calculate_rto(stcb,
6482 &tp1->sent_rcv_time,
6483 sctp_align_safe_nocopy);
6488 * CMT: CUCv2 algorithm. From the
6489 * cumack'd TSNs, for each TSN being
6490 * acked for the first time, set the
6491 * following variables for the
6492 * corresp destination.
6493 * new_pseudo_cumack will trigger a
6495 * find_(rtx_)pseudo_cumack will
6496 * trigger search for the next
6497 * expected (rtx-)pseudo-cumack.
6499 tp1->whoTo->new_pseudo_cumack = 1;
6500 tp1->whoTo->find_pseudo_cumack = 1;
6501 tp1->whoTo->find_rtx_pseudo_cumack = 1;
6503 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
6504 /* sa_ignore NO_NULL_CHK */
6505 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
6508 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6509 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
6511 if (tp1->rec.data.chunk_was_revoked) {
6512 /* deflate the cwnd */
6513 tp1->whoTo->cwnd -= tp1->book_size;
6514 tp1->rec.data.chunk_was_revoked = 0;
6516 tp1->sent = SCTP_DATAGRAM_ACKED;
6517 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
6519 /* sa_ignore NO_NULL_CHK */
6520 sctp_free_bufspace(stcb, asoc, tp1, 1);
6521 sctp_m_freem(tp1->data);
6523 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6524 sctp_log_sack(asoc->last_acked_seq,
6526 tp1->rec.data.TSN_seq,
6529 SCTP_LOG_FREE_SENT);
6532 asoc->sent_queue_cnt--;
6533 sctp_free_a_chunk(stcb, tp1);
6541 /* sa_ignore NO_NULL_CHK */
6542 if (stcb->sctp_socket) {
6543 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6548 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
6549 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6550 /* sa_ignore NO_NULL_CHK */
6551 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
6553 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6554 so = SCTP_INP_SO(stcb->sctp_ep);
6555 atomic_add_int(&stcb->asoc.refcnt, 1);
6556 SCTP_TCB_UNLOCK(stcb);
6557 SCTP_SOCKET_LOCK(so, 1);
6558 SCTP_TCB_LOCK(stcb);
6559 atomic_subtract_int(&stcb->asoc.refcnt, 1);
6560 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
6561 /* assoc was freed while we were unlocked */
6562 SCTP_SOCKET_UNLOCK(so, 1);
6566 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
6567 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6568 SCTP_SOCKET_UNLOCK(so, 1);
6571 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6572 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
6576 /* JRS - Use the congestion control given in the CC module */
6577 if (asoc->last_acked_seq != cumack)
6578 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
6580 asoc->last_acked_seq = cumack;
6582 if (TAILQ_EMPTY(&asoc->sent_queue)) {
6583 /* nothing left in-flight */
6584 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6585 net->flight_size = 0;
6586 net->partial_bytes_acked = 0;
6588 asoc->total_flight = 0;
6589 asoc->total_flight_count = 0;
6591 /* Fix up the a-p-a-p for future PR-SCTP sends */
6592 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
6593 asoc->advanced_peer_ack_point = cumack;
6595 /* ECN Nonce updates */
6596 if (asoc->ecn_nonce_allowed) {
6597 if (asoc->nonce_sum_check) {
6598 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
6599 if (asoc->nonce_wait_for_ecne == 0) {
6600 struct sctp_tmit_chunk *lchk;
6602 lchk = TAILQ_FIRST(&asoc->send_queue);
6603 asoc->nonce_wait_for_ecne = 1;
6605 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
6607 asoc->nonce_wait_tsn = asoc->sending_seq;
6610 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
6611 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
6613 * Misbehaving peer. We need
6614 * to react to this guy
6616 asoc->ecn_allowed = 0;
6617 asoc->ecn_nonce_allowed = 0;
6622 /* See if Resynchronization Possible */
6623 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
6624 asoc->nonce_sum_check = 1;
6626 * now we must calculate what the base is.
6627 * We do this based on two things, we know
6628 * the total's for all the segments
6629 * gap-acked in the SACK (none), We also
6630 * know the SACK's nonce sum, its in
6631 * nonce_sum_flag. So we can build a truth
6632 * table to back-calculate the new value of
6633 * asoc->nonce_sum_expect_base:
6635 * SACK-flag-Value Seg-Sums Base 0 0 0
6638 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
6643 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6644 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6645 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6646 /* SWS sender side engages */
6647 asoc->peers_rwnd = 0;
6649 if (asoc->peers_rwnd > old_rwnd) {
6650 win_probe_recovery = 1;
6652 /* Now assure a timer where data is queued at */
6655 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6658 if (win_probe_recovery && (net->window_probe)) {
6659 win_probe_recovered = 1;
6661 * Find first chunk that was used with window probe
6662 * and clear the sent
6664 /* sa_ignore FREED_MEMORY */
6665 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6666 if (tp1->window_probe) {
6667 /* move back to data send queue */
6668 sctp_window_probe_recovery(stcb, asoc, net, tp1);
6673 if (net->RTO == 0) {
6674 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
6676 to_ticks = MSEC_TO_TICKS(net->RTO);
6678 if (net->flight_size) {
6681 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6682 sctp_timeout_handler, &net->rxt_timer);
6683 if (net->window_probe) {
6684 net->window_probe = 0;
6687 if (net->window_probe) {
6689 * In window probes we must assure a timer
6690 * is still running there
6692 net->window_probe = 0;
6693 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6694 sctp_timeout_handler, &net->rxt_timer);
6695 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
6696 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
6698 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
6700 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
6701 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
6702 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
6703 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
6704 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
6710 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
6711 (asoc->sent_queue_retran_cnt == 0) &&
6712 (win_probe_recovered == 0) &&
6715 * huh, this should not happen unless all packets are
6716 * PR-SCTP and marked to skip of course.
6718 if (sctp_fs_audit(asoc)) {
6719 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6720 net->flight_size = 0;
6722 asoc->total_flight = 0;
6723 asoc->total_flight_count = 0;
6724 asoc->sent_queue_retran_cnt = 0;
6725 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6726 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6727 sctp_flight_size_increase(tp1);
6728 sctp_total_flight_increase(stcb, tp1);
6729 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6730 asoc->sent_queue_retran_cnt++;
6737 /**********************************/
6738 /* Now what about shutdown issues */
6739 /**********************************/
6740 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
6741 /* nothing left on sendqueue.. consider done */
6743 if ((asoc->stream_queue_cnt == 1) &&
6744 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
6745 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
6746 (asoc->locked_on_sending)
6748 struct sctp_stream_queue_pending *sp;
6751 * I may be in a state where we got all across.. but
6752 * cannot write more due to a shutdown... we abort
6753 * since the user did not indicate EOR in this case.
6754 * The sp will be cleaned during free of the asoc.
6756 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
6758 if ((sp) && (sp->length == 0)) {
6759 /* Let cleanup code purge it */
6760 if (sp->msg_is_complete) {
6761 asoc->stream_queue_cnt--;
6763 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6764 asoc->locked_on_sending = NULL;
6765 asoc->stream_queue_cnt--;
6769 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
6770 (asoc->stream_queue_cnt == 0)) {
6771 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6772 /* Need to abort here */
6778 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6779 0, M_DONTWAIT, 1, MT_DATA);
6781 struct sctp_paramhdr *ph;
6784 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6786 ph = mtod(oper, struct sctp_paramhdr *);
6787 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6788 ph->param_length = htons(SCTP_BUF_LEN(oper));
6789 ippp = (uint32_t *) (ph + 1);
6790 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
6792 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
6793 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
6795 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
6796 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
6797 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6799 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6800 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6801 sctp_stop_timers_for_shutdown(stcb);
6802 sctp_send_shutdown(stcb,
6803 stcb->asoc.primary_destination);
6804 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
6805 stcb->sctp_ep, stcb, asoc->primary_destination);
6806 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
6807 stcb->sctp_ep, stcb, asoc->primary_destination);
6809 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
6810 (asoc->stream_queue_cnt == 0)) {
6811 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6814 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6815 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
6816 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6817 sctp_send_shutdown_ack(stcb,
6818 stcb->asoc.primary_destination);
6820 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
6821 stcb->sctp_ep, stcb, asoc->primary_destination);
6824 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
6825 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
6827 stcb->asoc.peers_rwnd,
6828 stcb->asoc.total_flight,
6829 stcb->asoc.total_output_queue_size);
6833 /* EY! nr_sack version of sctp_handle_segments, nr-gapped TSNs get removed from RtxQ in this method*/
6835 sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
6836 struct sctp_nr_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
6837 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
6838 uint32_t num_seg, uint32_t num_nr_seg, int *ecn_seg_sums)
6840 /************************************************/
6841 /* process fragments and update sendqueue */
6842 /************************************************/
6843 struct sctp_nr_sack *nr_sack;
6844 struct sctp_gap_ack_block *frag, block;
6845 struct sctp_nr_gap_ack_block *nr_frag, nr_block;
6846 struct sctp_tmit_chunk *tp1;
6852 uint16_t frag_strt, frag_end, primary_flag_set;
6853 uint16_t nr_frag_strt, nr_frag_end;
6855 uint32_t last_frag_high;
6856 uint32_t last_nr_frag_high;
6859 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
6861 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
6862 primary_flag_set = 1;
6864 primary_flag_set = 0;
6866 nr_sack = &ch->nr_sack;
6869 * EY! - I will process nr_gaps similarly,by going to this position
6870 * again if All bit is set
6872 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
6873 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
6874 *offset += sizeof(block);
6880 for (i = 0; i < num_seg; i++) {
6881 frag_strt = ntohs(frag->start);
6882 frag_end = ntohs(frag->end);
6883 /* some sanity checks on the fargment offsets */
6884 if (frag_strt > frag_end) {
6885 /* this one is malformed, skip */
6889 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
6891 *biggest_tsn_acked = frag_end + last_tsn;
6893 /* mark acked dgs and find out the highestTSN being acked */
6895 tp1 = TAILQ_FIRST(&asoc->sent_queue);
6897 /* save the locations of the last frags */
6898 last_frag_high = frag_end + last_tsn;
6901 * now lets see if we need to reset the queue due to
6902 * a out-of-order SACK fragment
6904 if (compare_with_wrap(frag_strt + last_tsn,
6905 last_frag_high, MAX_TSN)) {
6907 * if the new frag starts after the last TSN
6908 * frag covered, we are ok and this one is
6909 * beyond the last one
6914 * ok, they have reset us, so we need to
6915 * reset the queue this will cause extra
6916 * hunting but hey, they chose the
6917 * performance hit when they failed to order
6920 tp1 = TAILQ_FIRST(&asoc->sent_queue);
6922 last_frag_high = frag_end + last_tsn;
6924 for (j = frag_strt; j <= frag_end; j++) {
6925 theTSN = j + last_tsn;
6927 if (tp1->rec.data.doing_fast_retransmit)
6931 * CMT: CUCv2 algorithm. For each TSN being
6932 * processed from the sent queue, track the
6933 * next expected pseudo-cumack, or
6934 * rtx_pseudo_cumack, if required. Separate
6935 * cumack trackers for first transmissions,
6936 * and retransmissions.
6938 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6939 (tp1->snd_count == 1)) {
6940 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
6941 tp1->whoTo->find_pseudo_cumack = 0;
6943 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6944 (tp1->snd_count > 1)) {
6945 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
6946 tp1->whoTo->find_rtx_pseudo_cumack = 0;
6948 if (tp1->rec.data.TSN_seq == theTSN) {
6949 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
6951 * must be held until
6955 * ECN Nonce: Add the nonce
6956 * value to the sender's
6959 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6961 * If it is less than RESEND, it is
6962 * now no-longer in flight.
6963 * Higher values may already be set
6964 * via previous Gap Ack Blocks...
6965 * i.e. ACKED or RESEND.
6967 if (compare_with_wrap(tp1->rec.data.TSN_seq,
6968 *biggest_newly_acked_tsn, MAX_TSN)) {
6969 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
6978 * this_sack_highest_
6982 if (tp1->rec.data.chunk_was_revoked == 0)
6983 tp1->whoTo->saw_newack = 1;
6985 if (compare_with_wrap(tp1->rec.data.TSN_seq,
6986 tp1->whoTo->this_sack_highest_newack,
6988 tp1->whoTo->this_sack_highest_newack =
6989 tp1->rec.data.TSN_seq;
6994 * this_sack_lowest_n
6997 if (*this_sack_lowest_newack == 0) {
6998 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6999 sctp_log_sack(*this_sack_lowest_newack,
7001 tp1->rec.data.TSN_seq,
7004 SCTP_LOG_TSN_ACKED);
7006 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
7011 * (rtx-)pseudo-cumac
7016 * (rtx-)pseudo-cumac
7018 * new_(rtx_)pseudo_c
7026 * (rtx-)pseudo-cumac
7034 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
7035 if (tp1->rec.data.chunk_was_revoked == 0) {
7036 tp1->whoTo->new_pseudo_cumack = 1;
7038 tp1->whoTo->find_pseudo_cumack = 1;
7040 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7041 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
7043 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
7044 if (tp1->rec.data.chunk_was_revoked == 0) {
7045 tp1->whoTo->new_pseudo_cumack = 1;
7047 tp1->whoTo->find_rtx_pseudo_cumack = 1;
7049 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7050 sctp_log_sack(*biggest_newly_acked_tsn,
7052 tp1->rec.data.TSN_seq,
7055 SCTP_LOG_TSN_ACKED);
7057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7058 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
7059 tp1->whoTo->flight_size,
7061 (uintptr_t) tp1->whoTo,
7062 tp1->rec.data.TSN_seq);
7064 sctp_flight_size_decrease(tp1);
7065 sctp_total_flight_decrease(stcb, tp1);
7067 tp1->whoTo->net_ack += tp1->send_size;
7068 if (tp1->snd_count < 2) {
7075 tp1->whoTo->net_ack2 += tp1->send_size;
7083 sctp_calculate_rto(stcb,
7086 &tp1->sent_rcv_time,
7087 sctp_align_safe_nocopy);
7092 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
7093 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
7094 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
7095 if (compare_with_wrap(tp1->rec.data.TSN_seq,
7096 asoc->this_sack_highest_gap,
7098 asoc->this_sack_highest_gap =
7099 tp1->rec.data.TSN_seq;
7101 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7102 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7103 #ifdef SCTP_AUDITING_ENABLED
7104 sctp_audit_log(0xB2,
7105 (asoc->sent_queue_retran_cnt & 0x000000ff));
7110 * All chunks NOT UNSENT
7111 * fall through here and are
7114 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
7115 tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7116 if (tp1->rec.data.chunk_was_revoked) {
7117 /* deflate the cwnd */
7118 tp1->whoTo->cwnd -= tp1->book_size;
7119 tp1->rec.data.chunk_was_revoked = 0;
7123 } /* if (tp1->TSN_seq == theTSN) */
7124 if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
7128 tp1 = TAILQ_NEXT(tp1, sctp_next);
7129 } /* end while (tp1) */
7130 } /* end for (j = fragStart */
7131 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
7132 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
7133 *offset += sizeof(block);
7139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
7141 sctp_log_fr(*biggest_tsn_acked,
7142 *biggest_newly_acked_tsn,
7143 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
7145 nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7146 sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7147 *offset += sizeof(nr_block);
7151 if (nr_frag == NULL) {
7155 last_nr_frag_high = 0;
7157 for (i = 0; i < num_nr_seg; i++) {
7159 nr_frag_strt = ntohs(nr_frag->start);
7160 nr_frag_end = ntohs(nr_frag->end);
7162 /* some sanity checks on the nr fargment offsets */
7163 if (nr_frag_strt > nr_frag_end) {
7164 /* this one is malformed, skip */
7168 /* mark acked dgs and find out the highestTSN being acked */
7170 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7172 /* save the locations of the last frags */
7173 last_nr_frag_high = nr_frag_end + last_tsn;
7176 * now lets see if we need to reset the queue due to
7177 * a out-of-order SACK fragment
7179 if (compare_with_wrap(nr_frag_strt + last_tsn,
7180 last_nr_frag_high, MAX_TSN)) {
7182 * if the new frag starts after the last TSN
7183 * frag covered, we are ok and this one is
7184 * beyond the last one
7189 * ok, they have reset us, so we need to
7190 * reset the queue this will cause extra
7191 * hunting but hey, they chose the
7192 * performance hit when they failed to order
7195 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7197 last_nr_frag_high = nr_frag_end + last_tsn;
7200 for (j = nr_frag_strt + last_tsn; (compare_with_wrap((nr_frag_end + last_tsn), j, MAX_TSN)); j++) {
7202 if (tp1->rec.data.TSN_seq == j) {
7203 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7204 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
7205 tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7207 * TAILQ_REMOVE(&asoc->sent_q
7208 * ueue, tp1, sctp_next);
7215 sctp_free_bufspace(stcb, asoc, tp1, 1);
7216 sctp_m_freem(tp1->data);
7219 /* asoc->sent_queue_cnt--; */
7221 * sctp_free_a_chunk(stcb,
7227 } /* if (tp1->TSN_seq == j) */
7228 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
7231 tp1 = TAILQ_NEXT(tp1, sctp_next);
7232 } /* end while (tp1) */
7234 } /* end for (j = nrFragStart */
7236 nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7237 sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7238 *offset += sizeof(nr_block);
7239 if (nr_frag == NULL) {
7245 * EY- wake up the socket if things have been removed from the sent
7248 if ((wake_him) && (stcb->sctp_socket)) {
7249 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7253 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7255 * if (SCTP_BASE_SYSCTL(sctp_logging_level) &
7256 * SCTP_WAKE_LOGGING_ENABLE) { sctp_wakeup_log(stcb,
7257 * cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);}
7259 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7260 so = SCTP_INP_SO(stcb->sctp_ep);
7261 atomic_add_int(&stcb->asoc.refcnt, 1);
7262 SCTP_TCB_UNLOCK(stcb);
7263 SCTP_SOCKET_LOCK(so, 1);
7264 SCTP_TCB_LOCK(stcb);
7265 atomic_subtract_int(&stcb->asoc.refcnt, 1);
7266 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7267 /* assoc was freed while we were unlocked */
7268 SCTP_SOCKET_UNLOCK(so, 1);
7272 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7273 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7274 SCTP_SOCKET_UNLOCK(so, 1);
7277 * (SCTP_BASE_SYSCTL(sctp_logging_level) &
7278 * SCTP_WAKE_LOGGING_ENABLE) {
7279 * sctp_wakeup_log(stcb, cum_ack, wake_him,
7280 * SCTP_NOWAKE_FROM_SACK); } } */
7284 /* Identifies the non-renegable tsns that are revoked*/
7286 sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
7287 struct sctp_association *asoc, uint32_t cumack,
7288 u_long biggest_tsn_acked)
7290 struct sctp_tmit_chunk *tp1;
7292 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7294 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
7297 * ok this guy is either ACK or MARKED. If it is
7298 * ACKED it has been previously acked but not this
7299 * time i.e. revoked. If it is MARKED it was ACK'ed
7302 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
7307 if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
7309 * EY! a non-renegable TSN is revoked, need
7310 * to abort the association
7313 * EY TODO: put in the code to abort the
7317 } else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
7318 /* it has been re-acked in this SACK */
7319 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
7322 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
7324 tp1 = TAILQ_NEXT(tp1, sctp_next);
7328 /* EY! nr_sack version of sctp_handle_sack, nr_gap_ack processing should be added to this method*/
7330 sctp_handle_nr_sack(struct mbuf *m, int offset,
7331 struct sctp_nr_sack_chunk *ch, struct sctp_tcb *stcb,
7332 struct sctp_nets *net_from, int *abort_now, int nr_sack_len, uint32_t rwnd)
7334 struct sctp_association *asoc;
7337 struct sctp_nr_sack *nr_sack;
7338 struct sctp_tmit_chunk *tp1, *tp2;
7339 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
7340 this_sack_lowest_newack;
7341 uint32_t sav_cum_ack;
7344 uint16_t num_seg, num_nr_seg, num_dup;
7345 uint16_t wake_him = 0;
7346 unsigned int nr_sack_length;
7347 uint32_t send_s = 0;
7349 int accum_moved = 0;
7350 int will_exit_fast_recovery = 0;
7351 uint32_t a_rwnd, old_rwnd;
7352 int win_probe_recovery = 0;
7353 int win_probe_recovered = 0;
7354 struct sctp_nets *net = NULL;
7355 int nonce_sum_flag, ecn_seg_sums = 0;
7357 uint8_t reneged_all = 0;
7358 uint8_t cmt_dac_flag;
7361 * we take any chance we can to service our queues since we cannot
7362 * get awoken when the socket is read from :<
7365 * Now perform the actual SACK handling: 1) Verify that it is not an
7366 * old sack, if so discard. 2) If there is nothing left in the send
7367 * queue (cum-ack is equal to last acked) then you have a duplicate
7368 * too, update any rwnd change and verify no timers are running.
7369 * then return. 3) Process any new consequtive data i.e. cum-ack
7370 * moved process these first and note that it moved. 4) Process any
7371 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
7372 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
7373 * sync up flightsizes and things, stop all timers and also check
7374 * for shutdown_pending state. If so then go ahead and send off the
7375 * shutdown. If in shutdown recv, send off the shutdown-ack and
7376 * start that timer, Ret. 9) Strike any non-acked things and do FR
7377 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
7378 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
7379 * if in shutdown_recv state.
7381 SCTP_TCB_LOCK_ASSERT(stcb);
7382 nr_sack = &ch->nr_sack;
7384 this_sack_lowest_newack = 0;
7386 nr_sack_length = (unsigned int)nr_sack_len;
7388 SCTP_STAT_INCR(sctps_slowpath_sack);
7389 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
7390 cum_ack = last_tsn = ntohl(nr_sack->cum_tsn_ack);
7391 #ifdef SCTP_ASOCLOG_OF_TSNS
7392 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
7393 stcb->asoc.cumack_log_at++;
7394 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
7395 stcb->asoc.cumack_log_at = 0;
7398 num_seg = ntohs(nr_sack->num_gap_ack_blks);
7399 num_nr_seg = ntohs(nr_sack->num_nr_gap_ack_blks);
7402 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
7403 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
7404 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
7407 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
7408 num_dup = ntohs(nr_sack->num_dup_tsns);
7410 old_rwnd = stcb->asoc.peers_rwnd;
7411 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
7412 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
7413 stcb->asoc.overall_error_count,
7415 SCTP_FROM_SCTP_INDATA,
7418 stcb->asoc.overall_error_count = 0;
7420 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7421 sctp_log_sack(asoc->last_acked_seq,
7428 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
7429 int off_to_dup, iii;
7430 uint32_t *dupdata, dblock;
7432 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) +
7433 (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) + sizeof(struct sctp_nr_sack_chunk);
7434 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= nr_sack_length) {
7435 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7436 sizeof(uint32_t), (uint8_t *) & dblock);
7437 off_to_dup += sizeof(uint32_t);
7439 for (iii = 0; iii < num_dup; iii++) {
7440 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
7441 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7442 sizeof(uint32_t), (uint8_t *) & dblock);
7443 if (dupdata == NULL)
7445 off_to_dup += sizeof(uint32_t);
7449 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d nr_sack_len:%d num gaps:%d num nr_gaps:%d\n",
7450 off_to_dup, num_dup, nr_sack_length, num_seg, num_nr_seg);
7453 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7455 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
7456 tp1 = TAILQ_LAST(&asoc->sent_queue,
7457 sctpchunk_listhead);
7458 send_s = tp1->rec.data.TSN_seq + 1;
7460 send_s = asoc->sending_seq;
7462 if (cum_ack == send_s ||
7463 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
7470 panic("Impossible sack 1");
7475 * no way, we have not even sent this TSN out yet.
7476 * Peer is hopelessly messed up with us.
7481 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
7482 0, M_DONTWAIT, 1, MT_DATA);
7484 struct sctp_paramhdr *ph;
7487 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
7489 ph = mtod(oper, struct sctp_paramhdr *);
7490 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
7491 ph->param_length = htons(SCTP_BUF_LEN(oper));
7492 ippp = (uint32_t *) (ph + 1);
7493 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
7495 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
7496 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
7501 /**********************/
7502 /* 1) check the range */
7503 /**********************/
7504 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
7505 /* acking something behind */
7508 sav_cum_ack = asoc->last_acked_seq;
7510 /* update the Rwnd of the peer */
7511 if (TAILQ_EMPTY(&asoc->sent_queue) &&
7512 TAILQ_EMPTY(&asoc->send_queue) &&
7513 (asoc->stream_queue_cnt == 0)
7515 /* nothing left on send/sent and strmq */
7516 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7517 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7518 asoc->peers_rwnd, 0, 0, a_rwnd);
7520 asoc->peers_rwnd = a_rwnd;
7521 if (asoc->sent_queue_retran_cnt) {
7522 asoc->sent_queue_retran_cnt = 0;
7524 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7525 /* SWS sender side engages */
7526 asoc->peers_rwnd = 0;
7528 /* stop any timers */
7529 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7530 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7531 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7532 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7533 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7534 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
7535 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7536 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7539 net->partial_bytes_acked = 0;
7540 net->flight_size = 0;
7542 asoc->total_flight = 0;
7543 asoc->total_flight_count = 0;
7547 * We init netAckSz and netAckSz2 to 0. These are used to track 2
7548 * things. The total byte count acked is tracked in netAckSz AND
7549 * netAck2 is used to track the total bytes acked that are un-
7550 * amibguious and were never retransmitted. We track these on a per
7551 * destination address basis.
7553 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7554 net->prev_cwnd = net->cwnd;
7559 * CMT: Reset CUC and Fast recovery algo variables before
7562 net->new_pseudo_cumack = 0;
7563 net->will_exit_fast_recovery = 0;
7565 /* process the new consecutive TSN first */
7566 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7568 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
7570 last_tsn == tp1->rec.data.TSN_seq) {
7571 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7573 * ECN Nonce: Add the nonce to the sender's
7576 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
7578 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
7580 * If it is less than ACKED, it is
7581 * now no-longer in flight. Higher
7582 * values may occur during marking
7584 if ((tp1->whoTo->dest_state &
7585 SCTP_ADDR_UNCONFIRMED) &&
7586 (tp1->snd_count < 2)) {
7588 * If there was no retran
7589 * and the address is
7590 * un-confirmed and we sent
7592 * sacked.. its confirmed,
7595 tp1->whoTo->dest_state &=
7596 ~SCTP_ADDR_UNCONFIRMED;
7598 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
7599 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7600 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
7601 tp1->whoTo->flight_size,
7603 (uintptr_t) tp1->whoTo,
7604 tp1->rec.data.TSN_seq);
7606 sctp_flight_size_decrease(tp1);
7607 sctp_total_flight_decrease(stcb, tp1);
7609 tp1->whoTo->net_ack += tp1->send_size;
7611 /* CMT SFR and DAC algos */
7612 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
7613 tp1->whoTo->saw_newack = 1;
7615 if (tp1->snd_count < 2) {
7617 * True non-retransmited
7620 tp1->whoTo->net_ack2 +=
7623 /* update RTO too? */
7626 sctp_calculate_rto(stcb,
7628 &tp1->sent_rcv_time,
7629 sctp_align_safe_nocopy);
7634 * CMT: CUCv2 algorithm. From the
7635 * cumack'd TSNs, for each TSN being
7636 * acked for the first time, set the
7637 * following variables for the
7638 * corresp destination.
7639 * new_pseudo_cumack will trigger a
7641 * find_(rtx_)pseudo_cumack will
7642 * trigger search for the next
7643 * expected (rtx-)pseudo-cumack.
7645 tp1->whoTo->new_pseudo_cumack = 1;
7646 tp1->whoTo->find_pseudo_cumack = 1;
7647 tp1->whoTo->find_rtx_pseudo_cumack = 1;
7650 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7651 sctp_log_sack(asoc->last_acked_seq,
7653 tp1->rec.data.TSN_seq,
7656 SCTP_LOG_TSN_ACKED);
7658 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7659 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
7662 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7663 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7664 #ifdef SCTP_AUDITING_ENABLED
7665 sctp_audit_log(0xB3,
7666 (asoc->sent_queue_retran_cnt & 0x000000ff));
7669 if (tp1->rec.data.chunk_was_revoked) {
7670 /* deflate the cwnd */
7671 tp1->whoTo->cwnd -= tp1->book_size;
7672 tp1->rec.data.chunk_was_revoked = 0;
7674 tp1->sent = SCTP_DATAGRAM_ACKED;
7679 tp1 = TAILQ_NEXT(tp1, sctp_next);
7681 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
7682 /* always set this up to cum-ack */
7683 asoc->this_sack_highest_gap = last_tsn;
7685 /* Move offset up to point to gaps/dups */
7686 offset += sizeof(struct sctp_nr_sack_chunk);
7687 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_nr_sack_chunk)) > nr_sack_length) {
7689 /* skip corrupt segments */
7695 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
7696 * to be greater than the cumack. Also reset saw_newack to 0
7699 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7700 net->saw_newack = 0;
7701 net->this_sack_highest_newack = last_tsn;
7705 * thisSackHighestGap will increase while handling NEW
7706 * segments this_sack_highest_newack will increase while
7707 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
7708 * used for CMT DAC algo. saw_newack will also change.
7711 sctp_handle_nr_sack_segments(m, &offset, stcb, asoc, ch, last_tsn,
7712 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
7713 num_seg, num_nr_seg, &ecn_seg_sums);
7716 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7718 * validate the biggest_tsn_acked in the gap acks if
7719 * strict adherence is wanted.
7721 if ((biggest_tsn_acked == send_s) ||
7722 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
7724 * peer is either confused or we are under
7725 * attack. We must abort.
7732 /*******************************************/
7733 /* cancel ALL T3-send timer if accum moved */
7734 /*******************************************/
7735 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
7736 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7737 if (net->new_pseudo_cumack)
7738 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7740 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
7745 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7746 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7747 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
7751 /********************************************/
7752 /* drop the acked chunks from the sendqueue */
7753 /********************************************/
7754 asoc->last_acked_seq = cum_ack;
7756 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7760 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
7764 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
7765 /* no more sent on list */
7766 printf("Warning, tp1->sent == %d and its now acked?\n",
7769 tp2 = TAILQ_NEXT(tp1, sctp_next);
7770 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
7771 if (tp1->pr_sctp_on) {
7772 if (asoc->pr_sctp_cnt != 0)
7773 asoc->pr_sctp_cnt--;
7775 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
7776 (asoc->total_flight > 0)) {
7778 panic("Warning flight size is postive and should be 0");
7780 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
7781 asoc->total_flight);
7783 asoc->total_flight = 0;
7786 /* sa_ignore NO_NULL_CHK */
7787 sctp_free_bufspace(stcb, asoc, tp1, 1);
7788 sctp_m_freem(tp1->data);
7789 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
7790 asoc->sent_queue_cnt_removeable--;
7793 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7794 sctp_log_sack(asoc->last_acked_seq,
7796 tp1->rec.data.TSN_seq,
7799 SCTP_LOG_FREE_SENT);
7802 asoc->sent_queue_cnt--;
7803 sctp_free_a_chunk(stcb, tp1);
7806 } while (tp1 != NULL);
7809 /* sa_ignore NO_NULL_CHK */
7810 if ((wake_him) && (stcb->sctp_socket)) {
7811 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7815 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7816 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7817 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
7819 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7820 so = SCTP_INP_SO(stcb->sctp_ep);
7821 atomic_add_int(&stcb->asoc.refcnt, 1);
7822 SCTP_TCB_UNLOCK(stcb);
7823 SCTP_SOCKET_LOCK(so, 1);
7824 SCTP_TCB_LOCK(stcb);
7825 atomic_subtract_int(&stcb->asoc.refcnt, 1);
7826 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7827 /* assoc was freed while we were unlocked */
7828 SCTP_SOCKET_UNLOCK(so, 1);
7832 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7833 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7834 SCTP_SOCKET_UNLOCK(so, 1);
7837 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7838 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
7842 if (asoc->fast_retran_loss_recovery && accum_moved) {
7843 if (compare_with_wrap(asoc->last_acked_seq,
7844 asoc->fast_recovery_tsn, MAX_TSN) ||
7845 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
7846 /* Setup so we will exit RFC2582 fast recovery */
7847 will_exit_fast_recovery = 1;
7851 * Check for revoked fragments:
7853 * if Previous sack - Had no frags then we can't have any revoked if
7854 * Previous sack - Had frag's then - If we now have frags aka
7855 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
7856 * some of them. else - The peer revoked all ACKED fragments, since
7857 * we had some before and now we have NONE.
7861 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7863 else if (asoc->saw_sack_with_frags) {
7864 int cnt_revoked = 0;
7866 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7868 /* Peer revoked all dg's marked or acked */
7869 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
7871 * EY- maybe check only if it is nr_acked
7872 * nr_marked may not be possible
7874 if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
7875 (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
7877 * EY! - TODO: Something previously
7878 * nr_gapped is reneged, abort the
7883 if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
7884 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
7885 tp1->sent = SCTP_DATAGRAM_SENT;
7886 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7887 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
7888 tp1->whoTo->flight_size,
7890 (uintptr_t) tp1->whoTo,
7891 tp1->rec.data.TSN_seq);
7893 sctp_flight_size_increase(tp1);
7894 sctp_total_flight_increase(stcb, tp1);
7895 tp1->rec.data.chunk_was_revoked = 1;
7897 * To ensure that this increase in
7898 * flightsize, which is artificial,
7899 * does not throttle the sender, we
7900 * also increase the cwnd
7903 tp1->whoTo->cwnd += tp1->book_size;
7911 asoc->saw_sack_with_frags = 0;
7914 asoc->saw_sack_with_frags = 1;
7916 asoc->saw_sack_with_frags = 0;
7918 /* EY! - not sure about if there should be an IF */
7920 sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7921 else if (asoc->saw_sack_with_nr_frags) {
7923 * EY!- TODO: all previously nr_gapped chunks have been
7924 * reneged abort the association
7926 asoc->saw_sack_with_nr_frags = 0;
7929 asoc->saw_sack_with_nr_frags = 1;
7931 asoc->saw_sack_with_nr_frags = 0;
7932 /* JRS - Use the congestion control given in the CC module */
7933 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
7935 if (TAILQ_EMPTY(&asoc->sent_queue)) {
7936 /* nothing left in-flight */
7937 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7938 /* stop all timers */
7939 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7940 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7941 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
7942 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7943 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
7946 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7947 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
7948 net->flight_size = 0;
7949 net->partial_bytes_acked = 0;
7951 asoc->total_flight = 0;
7952 asoc->total_flight_count = 0;
7954 /**********************************/
7955 /* Now what about shutdown issues */
7956 /**********************************/
7957 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
7958 /* nothing left on sendqueue.. consider done */
7959 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7960 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7961 asoc->peers_rwnd, 0, 0, a_rwnd);
7963 asoc->peers_rwnd = a_rwnd;
7964 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7965 /* SWS sender side engages */
7966 asoc->peers_rwnd = 0;
7969 if ((asoc->stream_queue_cnt == 1) &&
7970 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7971 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
7972 (asoc->locked_on_sending)
7974 struct sctp_stream_queue_pending *sp;
7977 * I may be in a state where we got all across.. but
7978 * cannot write more due to a shutdown... we abort
7979 * since the user did not indicate EOR in this case.
7981 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
7983 if ((sp) && (sp->length == 0)) {
7984 asoc->locked_on_sending = NULL;
7985 if (sp->msg_is_complete) {
7986 asoc->stream_queue_cnt--;
7988 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
7989 asoc->stream_queue_cnt--;
7993 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
7994 (asoc->stream_queue_cnt == 0)) {
7995 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
7996 /* Need to abort here */
8002 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
8003 0, M_DONTWAIT, 1, MT_DATA);
8005 struct sctp_paramhdr *ph;
8008 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
8010 ph = mtod(oper, struct sctp_paramhdr *);
8011 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
8012 ph->param_length = htons(SCTP_BUF_LEN(oper));
8013 ippp = (uint32_t *) (ph + 1);
8014 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
8016 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
8017 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
8020 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
8021 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
8022 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
8024 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
8025 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
8026 sctp_stop_timers_for_shutdown(stcb);
8027 sctp_send_shutdown(stcb,
8028 stcb->asoc.primary_destination);
8029 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
8030 stcb->sctp_ep, stcb, asoc->primary_destination);
8031 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
8032 stcb->sctp_ep, stcb, asoc->primary_destination);
8035 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
8036 (asoc->stream_queue_cnt == 0)) {
8037 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
8040 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
8041 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
8042 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
8043 sctp_send_shutdown_ack(stcb,
8044 stcb->asoc.primary_destination);
8046 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
8047 stcb->sctp_ep, stcb, asoc->primary_destination);
8052 * Now here we are going to recycle net_ack for a different use...
8055 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8060 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
8061 * to be done. Setting this_sack_lowest_newack to the cum_ack will
8062 * automatically ensure that.
8064 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
8065 this_sack_lowest_newack = cum_ack;
8068 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
8069 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
8071 /* JRS - Use the congestion control given in the CC module */
8072 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
8074 /******************************************************************
8075 * Here we do the stuff with ECN Nonce checking.
8076 * We basically check to see if the nonce sum flag was incorrect
8077 * or if resynchronization needs to be done. Also if we catch a
8078 * misbehaving receiver we give him the kick.
8079 ******************************************************************/
8081 if (asoc->ecn_nonce_allowed) {
8082 if (asoc->nonce_sum_check) {
8083 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
8084 if (asoc->nonce_wait_for_ecne == 0) {
8085 struct sctp_tmit_chunk *lchk;
8087 lchk = TAILQ_FIRST(&asoc->send_queue);
8088 asoc->nonce_wait_for_ecne = 1;
8090 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
8092 asoc->nonce_wait_tsn = asoc->sending_seq;
8095 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
8096 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
8098 * Misbehaving peer. We need
8099 * to react to this guy
8101 asoc->ecn_allowed = 0;
8102 asoc->ecn_nonce_allowed = 0;
8107 /* See if Resynchronization Possible */
8108 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
8109 asoc->nonce_sum_check = 1;
8111 * now we must calculate what the base is.
8112 * We do this based on two things, we know
8113 * the total's for all the segments
8114 * gap-acked in the SACK, its stored in
8115 * ecn_seg_sums. We also know the SACK's
8116 * nonce sum, its in nonce_sum_flag. So we
8117 * can build a truth table to back-calculate
8119 * asoc->nonce_sum_expect_base:
8121 * SACK-flag-Value Seg-Sums Base 0 0 0
8124 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
8128 /* Now are we exiting loss recovery ? */
8129 if (will_exit_fast_recovery) {
8130 /* Ok, we must exit fast recovery */
8131 asoc->fast_retran_loss_recovery = 0;
8133 if ((asoc->sat_t3_loss_recovery) &&
8134 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
8136 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
8137 /* end satellite t3 loss recovery */
8138 asoc->sat_t3_loss_recovery = 0;
8143 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8144 if (net->will_exit_fast_recovery) {
8145 /* Ok, we must exit fast recovery */
8146 net->fast_retran_loss_recovery = 0;
8150 /* Adjust and set the new rwnd value */
8151 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
8152 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
8153 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
8155 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
8156 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
8157 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
8158 /* SWS sender side engages */
8159 asoc->peers_rwnd = 0;
8161 if (asoc->peers_rwnd > old_rwnd) {
8162 win_probe_recovery = 1;
8165 * Now we must setup so we have a timer up for anyone with
8171 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8172 if (win_probe_recovery && (net->window_probe)) {
8173 win_probe_recovered = 1;
8175 * Find first chunk that was used with
8176 * window probe and clear the event. Put
8177 * it back into the send queue as if has
8180 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8181 if (tp1->window_probe) {
8182 sctp_window_probe_recovery(stcb, asoc, net, tp1);
8187 if (net->flight_size) {
8189 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8190 stcb->sctp_ep, stcb, net);
8191 if (net->window_probe) {
8192 net->window_probe = 0;
8195 if (net->window_probe) {
8196 net->window_probe = 0;
8197 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8198 stcb->sctp_ep, stcb, net);
8199 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8200 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
8202 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
8204 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8205 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8206 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
8207 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
8208 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
8214 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
8215 (asoc->sent_queue_retran_cnt == 0) &&
8216 (win_probe_recovered == 0) &&
8219 * huh, this should not happen unless all packets are
8220 * PR-SCTP and marked to skip of course.
8222 if (sctp_fs_audit(asoc)) {
8223 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8224 net->flight_size = 0;
8226 asoc->total_flight = 0;
8227 asoc->total_flight_count = 0;
8228 asoc->sent_queue_retran_cnt = 0;
8229 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8230 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
8231 sctp_flight_size_increase(tp1);
8232 sctp_total_flight_increase(stcb, tp1);
8233 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
8234 asoc->sent_queue_retran_cnt++;
8241 /*********************************************/
8242 /* Here we perform PR-SCTP procedures */
8244 /*********************************************/
8245 /* C1. update advancedPeerAckPoint */
8246 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
8247 asoc->advanced_peer_ack_point = cum_ack;
8249 /* C2. try to further move advancedPeerAckPoint ahead */
8250 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
8251 struct sctp_tmit_chunk *lchk;
8252 uint32_t old_adv_peer_ack_point;
8254 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
8255 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
8256 /* C3. See if we need to send a Fwd-TSN */
8257 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
8260 * ISSUE with ECN, see FWD-TSN processing for notes
8261 * on issues that will occur when the ECN NONCE
8262 * stuff is put into SCTP for cross checking.
8264 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
8266 send_forward_tsn(stcb, asoc);
8268 * ECN Nonce: Disable Nonce Sum check when
8269 * FWD TSN is sent and store resync tsn
8271 asoc->nonce_sum_check = 0;
8272 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
8274 /* try to FR fwd-tsn's that get lost too */
8275 lchk->rec.data.fwd_tsn_cnt++;
8276 if (lchk->rec.data.fwd_tsn_cnt > 3) {
8277 send_forward_tsn(stcb, asoc);
8278 lchk->rec.data.fwd_tsn_cnt = 0;
8283 /* Assure a timer is up */
8284 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8285 stcb->sctp_ep, stcb, lchk->whoTo);
8288 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
8289 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
8291 stcb->asoc.peers_rwnd,
8292 stcb->asoc.total_flight,
8293 stcb->asoc.total_output_queue_size);