2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
95 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
101 /* what is the overhead of all these rwnd's */
102 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
104 * If the window gets too small due to ctrl-stuff, reduce it to 1,
105 * even it is 0. SWS engaged
107 if (calc < stcb->asoc.my_rwnd_control_len) {
116 * Build out our readq entry based on the incoming packet.
118 struct sctp_queued_to_read *
119 sctp_build_readq_entry(struct sctp_tcb *stcb,
120 struct sctp_nets *net,
121 uint32_t tsn, uint32_t ppid,
122 uint32_t context, uint16_t stream_no,
123 uint16_t stream_seq, uint8_t flags,
126 struct sctp_queued_to_read *read_queue_e = NULL;
128 sctp_alloc_a_readq(stcb, read_queue_e);
129 if (read_queue_e == NULL) {
132 read_queue_e->sinfo_stream = stream_no;
133 read_queue_e->sinfo_ssn = stream_seq;
134 read_queue_e->sinfo_flags = (flags << 8);
135 read_queue_e->sinfo_ppid = ppid;
136 read_queue_e->sinfo_context = stcb->asoc.context;
137 read_queue_e->sinfo_timetolive = 0;
138 read_queue_e->sinfo_tsn = tsn;
139 read_queue_e->sinfo_cumtsn = tsn;
140 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
141 read_queue_e->whoFrom = net;
142 read_queue_e->length = 0;
143 atomic_add_int(&net->ref_count, 1);
144 read_queue_e->data = dm;
145 read_queue_e->spec_flags = 0;
146 read_queue_e->tail_mbuf = NULL;
147 read_queue_e->aux_data = NULL;
148 read_queue_e->stcb = stcb;
149 read_queue_e->port_from = stcb->rport;
150 read_queue_e->do_not_ref_stcb = 0;
151 read_queue_e->end_added = 0;
152 read_queue_e->some_taken = 0;
153 read_queue_e->pdapi_aborted = 0;
155 return (read_queue_e);
160 * Build out our readq entry based on the incoming packet.
162 static struct sctp_queued_to_read *
163 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
164 struct sctp_tmit_chunk *chk)
166 struct sctp_queued_to_read *read_queue_e = NULL;
168 sctp_alloc_a_readq(stcb, read_queue_e);
169 if (read_queue_e == NULL) {
172 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
173 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
174 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
175 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
176 read_queue_e->sinfo_context = stcb->asoc.context;
177 read_queue_e->sinfo_timetolive = 0;
178 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
179 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
180 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
181 read_queue_e->whoFrom = chk->whoTo;
182 read_queue_e->aux_data = NULL;
183 read_queue_e->length = 0;
184 atomic_add_int(&chk->whoTo->ref_count, 1);
185 read_queue_e->data = chk->data;
186 read_queue_e->tail_mbuf = NULL;
187 read_queue_e->stcb = stcb;
188 read_queue_e->port_from = stcb->rport;
189 read_queue_e->spec_flags = 0;
190 read_queue_e->do_not_ref_stcb = 0;
191 read_queue_e->end_added = 0;
192 read_queue_e->some_taken = 0;
193 read_queue_e->pdapi_aborted = 0;
195 return (read_queue_e);
200 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
201 struct sctp_sndrcvinfo *sinfo)
203 struct sctp_sndrcvinfo *outinfo;
207 int use_extended = 0;
209 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
210 /* user does not want the sndrcv ctl */
213 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
215 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
217 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
221 ret = sctp_get_mbuf_for_msg(len,
222 0, M_DONTWAIT, 1, MT_DATA);
228 /* We need a CMSG header followed by the struct */
229 cmh = mtod(ret, struct cmsghdr *);
230 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
231 cmh->cmsg_level = IPPROTO_SCTP;
233 cmh->cmsg_type = SCTP_EXTRCV;
235 memcpy(outinfo, sinfo, len);
237 cmh->cmsg_type = SCTP_SNDRCV;
241 SCTP_BUF_LEN(ret) = cmh->cmsg_len;
247 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
249 struct sctp_sndrcvinfo *sinfo)
251 struct sctp_sndrcvinfo *outinfo;
255 int use_extended = 0;
257 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
258 /* user does not want the sndrcv ctl */
261 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
263 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
265 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
267 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
272 /* We need a CMSG header followed by the struct */
273 cmh = (struct cmsghdr *)buf;
274 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
275 cmh->cmsg_level = IPPROTO_SCTP;
277 cmh->cmsg_type = SCTP_EXTRCV;
279 memcpy(outinfo, sinfo, len);
281 cmh->cmsg_type = SCTP_SNDRCV;
291 * We are delivering currently from the reassembly queue. We must continue to
292 * deliver until we either: 1) run out of space. 2) run out of sequential
293 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
296 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
298 struct sctp_tmit_chunk *chk;
304 /* EY if any out-of-order delivered, then tag it nr on nr_map */
305 uint32_t nr_tsn, nr_gap;
307 struct sctp_queued_to_read *control, *ctl, *ctlat;
312 cntDel = stream_no = 0;
313 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
314 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
315 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
316 /* socket above is long gone or going.. */
318 asoc->fragmented_delivery_inprogress = 0;
319 chk = TAILQ_FIRST(&asoc->reasmqueue);
321 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
322 asoc->size_on_reasm_queue -= chk->send_size;
323 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
325 * Lose the data pointer, since its in the socket
329 sctp_m_freem(chk->data);
332 /* Now free the address and data */
333 sctp_free_a_chunk(stcb, chk);
334 /* sa_ignore FREED_MEMORY */
335 chk = TAILQ_FIRST(&asoc->reasmqueue);
339 SCTP_TCB_LOCK_ASSERT(stcb);
341 chk = TAILQ_FIRST(&asoc->reasmqueue);
345 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
346 /* Can't deliver more :< */
349 stream_no = chk->rec.data.stream_number;
350 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
351 if (nxt_todel != chk->rec.data.stream_seq &&
352 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
354 * Not the next sequence to deliver in its stream OR
359 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
361 control = sctp_build_readq_entry_chk(stcb, chk);
362 if (control == NULL) {
366 /* save it off for our future deliveries */
367 stcb->asoc.control_pdapi = control;
368 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
372 sctp_add_to_readq(stcb->sctp_ep,
373 stcb, control, &stcb->sctp_socket->so_rcv, end, SCTP_SO_NOT_LOCKED);
376 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
380 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
381 stcb->asoc.control_pdapi,
382 chk->data, end, chk->rec.data.TSN_seq,
383 &stcb->sctp_socket->so_rcv)) {
385 * something is very wrong, either
386 * control_pdapi is NULL, or the tail_mbuf
387 * is corrupt, or there is a EOM already on
390 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
394 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
395 panic("This should not happen control_pdapi NULL?");
397 /* if we did not panic, it was a EOM */
398 panic("Bad chunking ??");
400 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
401 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
403 SCTP_PRINTF("Bad chunking ??\n");
404 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
412 /* pull it we did it */
413 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
415 * EY this is the chunk that should be tagged nr gapped
416 * calculate the gap and such then tag this TSN nr
417 * chk->rec.data.TSN_seq
420 * EY!-TODO- this tsn should be tagged nr only if it is
421 * out-of-order, the if statement should be modified
423 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
425 nr_tsn = chk->rec.data.TSN_seq;
426 if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
427 nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
429 nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
431 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
432 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
434 * EY The 1st should never happen, as in
435 * process_a_data_chunk method this check
439 * EY The 2nd should never happen, because
440 * nr_mapping_array is always expanded when
441 * mapping_array is expanded
444 SCTP_TCB_LOCK_ASSERT(stcb);
445 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
446 if (nr_tsn > asoc->highest_tsn_inside_nr_map)
447 asoc->highest_tsn_inside_nr_map = nr_tsn;
450 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
451 asoc->fragmented_delivery_inprogress = 0;
452 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
453 asoc->strmin[stream_no].last_sequence_delivered++;
455 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
456 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
458 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
460 * turn the flag back on since we just delivered
463 asoc->fragmented_delivery_inprogress = 1;
465 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
466 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
467 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
468 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
470 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
471 asoc->size_on_reasm_queue -= chk->send_size;
472 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
473 /* free up the chk */
475 sctp_free_a_chunk(stcb, chk);
477 if (asoc->fragmented_delivery_inprogress == 0) {
479 * Now lets see if we can deliver the next one on
482 struct sctp_stream_in *strm;
484 strm = &asoc->strmin[stream_no];
485 nxt_todel = strm->last_sequence_delivered + 1;
486 ctl = TAILQ_FIRST(&strm->inqueue);
487 if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
488 while (ctl != NULL) {
489 /* Deliver more if we can. */
490 if (nxt_todel == ctl->sinfo_ssn) {
491 ctlat = TAILQ_NEXT(ctl, next);
492 TAILQ_REMOVE(&strm->inqueue, ctl, next);
493 asoc->size_on_all_streams -= ctl->length;
494 sctp_ucount_decr(asoc->cnt_on_all_streams);
495 strm->last_sequence_delivered++;
500 nr_tsn = ctl->sinfo_tsn;
501 sctp_add_to_readq(stcb->sctp_ep, stcb,
503 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
505 * EY -now something is
506 * delivered, calculate
507 * nr_gap and tag this tsn
510 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
512 if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
513 nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
515 nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
517 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
518 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
551 SCTP_TCB_LOCK_ASSERT(stcb);
552 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
553 if (nr_tsn > asoc->highest_tsn_inside_nr_map)
554 asoc->highest_tsn_inside_nr_map = nr_tsn;
561 nxt_todel = strm->last_sequence_delivered + 1;
566 /* sa_ignore FREED_MEMORY */
567 chk = TAILQ_FIRST(&asoc->reasmqueue);
572 * Queue the chunk either right into the socket buffer if it is the next one
573 * to go OR put it in the correct place in the delivery queue. If we do
574 * append to the so_buf, keep doing so until we are out of order. One big
575 * question still remains, what to do when the socket buffer is FULL??
578 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
579 struct sctp_queued_to_read *control, int *abort_flag)
582 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
583 * all the data in one stream this could happen quite rapidly. One
584 * could use the TSN to keep track of things, but this scheme breaks
585 * down in the other type of stream useage that could occur. Send a
586 * single msg to stream 0, send 4Billion messages to stream 1, now
587 * send a message to stream 0. You have a situation where the TSN
588 * has wrapped but not in the stream. Is this worth worrying about
589 * or should we just change our queue sort at the bottom to be by
592 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
593 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
594 * assignment this could happen... and I don't see how this would be
595 * a violation. So for now I am undecided an will leave the sort by
596 * SSN alone. Maybe a hybred approach is the answer
599 struct sctp_stream_in *strm;
600 struct sctp_queued_to_read *at;
605 /* EY- will be used to calculate nr-gap for a tsn */
606 uint32_t nr_tsn, nr_gap;
609 asoc->size_on_all_streams += control->length;
610 sctp_ucount_incr(asoc->cnt_on_all_streams);
611 strm = &asoc->strmin[control->sinfo_stream];
612 nxt_todel = strm->last_sequence_delivered + 1;
613 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
614 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
616 SCTPDBG(SCTP_DEBUG_INDATA1,
617 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
618 (uint32_t) control->sinfo_stream,
619 (uint32_t) strm->last_sequence_delivered,
620 (uint32_t) nxt_todel);
621 if (compare_with_wrap(strm->last_sequence_delivered,
622 control->sinfo_ssn, MAX_SEQ) ||
623 (strm->last_sequence_delivered == control->sinfo_ssn)) {
624 /* The incoming sseq is behind where we last delivered? */
625 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
626 control->sinfo_ssn, strm->last_sequence_delivered);
629 * throw it in the stream so it gets cleaned up in
630 * association destruction
632 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
633 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
634 0, M_DONTWAIT, 1, MT_DATA);
636 struct sctp_paramhdr *ph;
639 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
640 (sizeof(uint32_t) * 3);
641 ph = mtod(oper, struct sctp_paramhdr *);
642 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
643 ph->param_length = htons(SCTP_BUF_LEN(oper));
644 ippp = (uint32_t *) (ph + 1);
645 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
647 *ippp = control->sinfo_tsn;
649 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
651 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
652 sctp_abort_an_association(stcb->sctp_ep, stcb,
653 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
659 if (nxt_todel == control->sinfo_ssn) {
660 /* can be delivered right away? */
661 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
662 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
664 /* EY it wont be queued if it could be delivered directly */
666 asoc->size_on_all_streams -= control->length;
667 sctp_ucount_decr(asoc->cnt_on_all_streams);
668 strm->last_sequence_delivered++;
669 /* EY will be used to calculate nr-gap */
670 nr_tsn = control->sinfo_tsn;
671 sctp_add_to_readq(stcb->sctp_ep, stcb,
673 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
676 * EY this is the chunk that should be tagged nr gapped
677 * calculate the gap and such then tag this TSN nr
678 * chk->rec.data.TSN_seq
680 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
682 if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
683 nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
685 nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
687 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
688 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
690 * EY The 1st should never happen, as in
691 * process_a_data_chunk method this check
695 * EY The 2nd should never happen, because
696 * nr_mapping_array is always expanded when
697 * mapping_array is expanded
700 SCTP_TCB_LOCK_ASSERT(stcb);
701 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
702 if (nr_tsn > asoc->highest_tsn_inside_nr_map)
703 asoc->highest_tsn_inside_nr_map = nr_tsn;
706 control = TAILQ_FIRST(&strm->inqueue);
707 while (control != NULL) {
709 nxt_todel = strm->last_sequence_delivered + 1;
710 if (nxt_todel == control->sinfo_ssn) {
711 at = TAILQ_NEXT(control, next);
712 TAILQ_REMOVE(&strm->inqueue, control, next);
713 asoc->size_on_all_streams -= control->length;
714 sctp_ucount_decr(asoc->cnt_on_all_streams);
715 strm->last_sequence_delivered++;
717 * We ignore the return of deliver_data here
718 * since we always can hold the chunk on the
719 * d-queue. And we have a finite number that
720 * can be delivered from the strq.
722 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
723 sctp_log_strm_del(control, NULL,
724 SCTP_STR_LOG_FROM_IMMED_DEL);
726 /* EY will be used to calculate nr-gap */
727 nr_tsn = control->sinfo_tsn;
728 sctp_add_to_readq(stcb->sctp_ep, stcb,
730 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
732 * EY this is the chunk that should be
733 * tagged nr gapped calculate the gap and
734 * such then tag this TSN nr
735 * chk->rec.data.TSN_seq
737 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
739 if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
740 nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
742 nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
744 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
745 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
747 * EY The 1st should never
749 * process_a_data_chunk
750 * method this check should
754 * EY The 2nd should never
756 * nr_mapping_array is
757 * always expanded when
758 * mapping_array is expanded
761 SCTP_TCB_LOCK_ASSERT(stcb);
762 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
763 if (nr_tsn > asoc->highest_tsn_inside_nr_map)
764 asoc->highest_tsn_inside_nr_map = nr_tsn;
775 * Ok, we did not deliver this guy, find the correct place
776 * to put it on the queue.
778 if ((compare_with_wrap(asoc->cumulative_tsn,
779 control->sinfo_tsn, MAX_TSN)) ||
780 (control->sinfo_tsn == asoc->cumulative_tsn)) {
783 if (TAILQ_EMPTY(&strm->inqueue)) {
785 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
786 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
788 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
790 TAILQ_FOREACH(at, &strm->inqueue, next) {
791 if (compare_with_wrap(at->sinfo_ssn,
792 control->sinfo_ssn, MAX_SEQ)) {
794 * one in queue is bigger than the
795 * new one, insert before this one
797 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
798 sctp_log_strm_del(control, at,
799 SCTP_STR_LOG_FROM_INSERT_MD);
801 TAILQ_INSERT_BEFORE(at, control, next);
803 } else if (at->sinfo_ssn == control->sinfo_ssn) {
805 * Gak, He sent me a duplicate str
809 * foo bar, I guess I will just free
810 * this new guy, should we abort
811 * too? FIX ME MAYBE? Or it COULD be
812 * that the SSN's have wrapped.
813 * Maybe I should compare to TSN
814 * somehow... sigh for now just blow
819 sctp_m_freem(control->data);
820 control->data = NULL;
821 asoc->size_on_all_streams -= control->length;
822 sctp_ucount_decr(asoc->cnt_on_all_streams);
823 if (control->whoFrom)
824 sctp_free_remote_addr(control->whoFrom);
825 control->whoFrom = NULL;
826 sctp_free_a_readq(stcb, control);
829 if (TAILQ_NEXT(at, next) == NULL) {
831 * We are at the end, insert
834 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
835 sctp_log_strm_del(control, at,
836 SCTP_STR_LOG_FROM_INSERT_TL);
838 TAILQ_INSERT_AFTER(&strm->inqueue,
849 * Returns two things: You get the total size of the deliverable parts of the
850 * first fragmented message on the reassembly queue. And you get a 1 back if
851 * all of the message is ready or a 0 back if the message is still incomplete
854 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
856 struct sctp_tmit_chunk *chk;
860 chk = TAILQ_FIRST(&asoc->reasmqueue);
862 /* nothing on the queue */
865 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
866 /* Not a first on the queue */
869 tsn = chk->rec.data.TSN_seq;
871 if (tsn != chk->rec.data.TSN_seq) {
874 *t_size += chk->send_size;
875 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
879 chk = TAILQ_NEXT(chk, sctp_next);
885 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
887 struct sctp_tmit_chunk *chk;
892 chk = TAILQ_FIRST(&asoc->reasmqueue);
895 asoc->size_on_reasm_queue = 0;
896 asoc->cnt_on_reasm_queue = 0;
899 if (asoc->fragmented_delivery_inprogress == 0) {
901 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
902 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
903 (nxt_todel == chk->rec.data.stream_seq ||
904 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
906 * Yep the first one is here and its ok to deliver
909 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
910 (tsize >= stcb->sctp_ep->partial_delivery_point))) {
913 * Yes, we setup to start reception, by
914 * backing down the TSN just in case we
915 * can't deliver. If we
917 asoc->fragmented_delivery_inprogress = 1;
918 asoc->tsn_last_delivered =
919 chk->rec.data.TSN_seq - 1;
921 chk->rec.data.stream_number;
922 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
923 asoc->pdapi_ppid = chk->rec.data.payloadtype;
924 asoc->fragment_flags = chk->rec.data.rcv_flags;
925 sctp_service_reassembly(stcb, asoc);
930 * Service re-assembly will deliver stream data queued at
931 * the end of fragmented delivery.. but it wont know to go
932 * back and call itself again... we do that here with the
935 sctp_service_reassembly(stcb, asoc);
936 if (asoc->fragmented_delivery_inprogress == 0) {
938 * finished our Fragmented delivery, could be more
947 * Dump onto the re-assembly queue, in its proper place. After dumping on the
948 * queue, see if anthing can be delivered. If so pull it off (or as much as
949 * we can. If we run out of space then we must dump what we can and set the
950 * appropriate flag to say we queued what we could.
953 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
954 struct sctp_tmit_chunk *chk, int *abort_flag)
957 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
959 struct sctp_tmit_chunk *at, *prev, *next;
962 cum_ackp1 = asoc->tsn_last_delivered + 1;
963 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
964 /* This is the first one on the queue */
965 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
967 * we do not check for delivery of anything when only one
970 asoc->size_on_reasm_queue = chk->send_size;
971 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
972 if (chk->rec.data.TSN_seq == cum_ackp1) {
973 if (asoc->fragmented_delivery_inprogress == 0 &&
974 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
975 SCTP_DATA_FIRST_FRAG) {
977 * An empty queue, no delivery inprogress,
978 * we hit the next one and it does NOT have
979 * a FIRST fragment mark.
981 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
982 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
983 0, M_DONTWAIT, 1, MT_DATA);
986 struct sctp_paramhdr *ph;
990 sizeof(struct sctp_paramhdr) +
991 (sizeof(uint32_t) * 3);
992 ph = mtod(oper, struct sctp_paramhdr *);
994 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
995 ph->param_length = htons(SCTP_BUF_LEN(oper));
996 ippp = (uint32_t *) (ph + 1);
997 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
999 *ippp = chk->rec.data.TSN_seq;
1001 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1004 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
1005 sctp_abort_an_association(stcb->sctp_ep, stcb,
1006 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1008 } else if (asoc->fragmented_delivery_inprogress &&
1009 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1011 * We are doing a partial delivery and the
1012 * NEXT chunk MUST be either the LAST or
1013 * MIDDLE fragment NOT a FIRST
1015 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1016 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1017 0, M_DONTWAIT, 1, MT_DATA);
1019 struct sctp_paramhdr *ph;
1022 SCTP_BUF_LEN(oper) =
1023 sizeof(struct sctp_paramhdr) +
1024 (3 * sizeof(uint32_t));
1025 ph = mtod(oper, struct sctp_paramhdr *);
1027 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1028 ph->param_length = htons(SCTP_BUF_LEN(oper));
1029 ippp = (uint32_t *) (ph + 1);
1030 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
1032 *ippp = chk->rec.data.TSN_seq;
1034 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1036 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
1037 sctp_abort_an_association(stcb->sctp_ep, stcb,
1038 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1040 } else if (asoc->fragmented_delivery_inprogress) {
1042 * Here we are ok with a MIDDLE or LAST
1045 if (chk->rec.data.stream_number !=
1046 asoc->str_of_pdapi) {
1047 /* Got to be the right STR No */
1048 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
1049 chk->rec.data.stream_number,
1050 asoc->str_of_pdapi);
1051 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1052 0, M_DONTWAIT, 1, MT_DATA);
1054 struct sctp_paramhdr *ph;
1057 SCTP_BUF_LEN(oper) =
1058 sizeof(struct sctp_paramhdr) +
1059 (sizeof(uint32_t) * 3);
1061 struct sctp_paramhdr *);
1063 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1065 htons(SCTP_BUF_LEN(oper));
1066 ippp = (uint32_t *) (ph + 1);
1067 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1069 *ippp = chk->rec.data.TSN_seq;
1071 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1073 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
1074 sctp_abort_an_association(stcb->sctp_ep,
1075 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1077 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1078 SCTP_DATA_UNORDERED &&
1079 chk->rec.data.stream_seq !=
1080 asoc->ssn_of_pdapi) {
1081 /* Got to be the right STR Seq */
1082 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1083 chk->rec.data.stream_seq,
1084 asoc->ssn_of_pdapi);
1085 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1086 0, M_DONTWAIT, 1, MT_DATA);
1088 struct sctp_paramhdr *ph;
1091 SCTP_BUF_LEN(oper) =
1092 sizeof(struct sctp_paramhdr) +
1093 (3 * sizeof(uint32_t));
1095 struct sctp_paramhdr *);
1097 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1099 htons(SCTP_BUF_LEN(oper));
1100 ippp = (uint32_t *) (ph + 1);
1101 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1103 *ippp = chk->rec.data.TSN_seq;
1105 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1108 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1109 sctp_abort_an_association(stcb->sctp_ep,
1110 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1117 /* Find its place */
1118 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1119 if (compare_with_wrap(at->rec.data.TSN_seq,
1120 chk->rec.data.TSN_seq, MAX_TSN)) {
1122 * one in queue is bigger than the new one, insert
1126 asoc->size_on_reasm_queue += chk->send_size;
1127 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1129 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1131 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1132 /* Gak, He sent me a duplicate str seq number */
1134 * foo bar, I guess I will just free this new guy,
1135 * should we abort too? FIX ME MAYBE? Or it COULD be
1136 * that the SSN's have wrapped. Maybe I should
1137 * compare to TSN somehow... sigh for now just blow
1141 sctp_m_freem(chk->data);
1144 sctp_free_a_chunk(stcb, chk);
1147 last_flags = at->rec.data.rcv_flags;
1148 last_tsn = at->rec.data.TSN_seq;
1150 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1152 * We are at the end, insert it after this
1155 /* check it first */
1156 asoc->size_on_reasm_queue += chk->send_size;
1157 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1158 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1163 /* Now the audits */
1165 prev_tsn = chk->rec.data.TSN_seq - 1;
1166 if (prev_tsn == prev->rec.data.TSN_seq) {
1168 * Ok the one I am dropping onto the end is the
1169 * NEXT. A bit of valdiation here.
1171 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1172 SCTP_DATA_FIRST_FRAG ||
1173 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1174 SCTP_DATA_MIDDLE_FRAG) {
1176 * Insert chk MUST be a MIDDLE or LAST
1179 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1180 SCTP_DATA_FIRST_FRAG) {
1181 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1182 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1183 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1184 0, M_DONTWAIT, 1, MT_DATA);
1186 struct sctp_paramhdr *ph;
1189 SCTP_BUF_LEN(oper) =
1190 sizeof(struct sctp_paramhdr) +
1191 (3 * sizeof(uint32_t));
1193 struct sctp_paramhdr *);
1195 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1197 htons(SCTP_BUF_LEN(oper));
1198 ippp = (uint32_t *) (ph + 1);
1199 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1201 *ippp = chk->rec.data.TSN_seq;
1203 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1206 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1207 sctp_abort_an_association(stcb->sctp_ep,
1208 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1212 if (chk->rec.data.stream_number !=
1213 prev->rec.data.stream_number) {
1215 * Huh, need the correct STR here,
1216 * they must be the same.
1218 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1219 chk->rec.data.stream_number,
1220 prev->rec.data.stream_number);
1221 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1222 0, M_DONTWAIT, 1, MT_DATA);
1224 struct sctp_paramhdr *ph;
1227 SCTP_BUF_LEN(oper) =
1228 sizeof(struct sctp_paramhdr) +
1229 (3 * sizeof(uint32_t));
1231 struct sctp_paramhdr *);
1233 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1235 htons(SCTP_BUF_LEN(oper));
1236 ippp = (uint32_t *) (ph + 1);
1237 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1239 *ippp = chk->rec.data.TSN_seq;
1241 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1243 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1244 sctp_abort_an_association(stcb->sctp_ep,
1245 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1250 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1251 chk->rec.data.stream_seq !=
1252 prev->rec.data.stream_seq) {
1254 * Huh, need the correct STR here,
1255 * they must be the same.
1257 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1258 chk->rec.data.stream_seq,
1259 prev->rec.data.stream_seq);
1260 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1261 0, M_DONTWAIT, 1, MT_DATA);
1263 struct sctp_paramhdr *ph;
1266 SCTP_BUF_LEN(oper) =
1267 sizeof(struct sctp_paramhdr) +
1268 (3 * sizeof(uint32_t));
1270 struct sctp_paramhdr *);
1272 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1274 htons(SCTP_BUF_LEN(oper));
1275 ippp = (uint32_t *) (ph + 1);
1276 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1278 *ippp = chk->rec.data.TSN_seq;
1280 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1282 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1283 sctp_abort_an_association(stcb->sctp_ep,
1284 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1289 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1290 SCTP_DATA_LAST_FRAG) {
1291 /* Insert chk MUST be a FIRST */
1292 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1293 SCTP_DATA_FIRST_FRAG) {
1294 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1295 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1296 0, M_DONTWAIT, 1, MT_DATA);
1298 struct sctp_paramhdr *ph;
1301 SCTP_BUF_LEN(oper) =
1302 sizeof(struct sctp_paramhdr) +
1303 (3 * sizeof(uint32_t));
1305 struct sctp_paramhdr *);
1307 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1309 htons(SCTP_BUF_LEN(oper));
1310 ippp = (uint32_t *) (ph + 1);
1311 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1313 *ippp = chk->rec.data.TSN_seq;
1315 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1318 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1319 sctp_abort_an_association(stcb->sctp_ep,
1320 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1329 post_tsn = chk->rec.data.TSN_seq + 1;
1330 if (post_tsn == next->rec.data.TSN_seq) {
1332 * Ok the one I am inserting ahead of is my NEXT
1333 * one. A bit of valdiation here.
1335 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1336 /* Insert chk MUST be a last fragment */
1337 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1338 != SCTP_DATA_LAST_FRAG) {
1339 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1340 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1341 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1342 0, M_DONTWAIT, 1, MT_DATA);
1344 struct sctp_paramhdr *ph;
1347 SCTP_BUF_LEN(oper) =
1348 sizeof(struct sctp_paramhdr) +
1349 (3 * sizeof(uint32_t));
1351 struct sctp_paramhdr *);
1353 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1355 htons(SCTP_BUF_LEN(oper));
1356 ippp = (uint32_t *) (ph + 1);
1357 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1359 *ippp = chk->rec.data.TSN_seq;
1361 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1363 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1364 sctp_abort_an_association(stcb->sctp_ep,
1365 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1370 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1371 SCTP_DATA_MIDDLE_FRAG ||
1372 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1373 SCTP_DATA_LAST_FRAG) {
1375 * Insert chk CAN be MIDDLE or FIRST NOT
1378 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1379 SCTP_DATA_LAST_FRAG) {
1380 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1381 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1382 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1383 0, M_DONTWAIT, 1, MT_DATA);
1385 struct sctp_paramhdr *ph;
1388 SCTP_BUF_LEN(oper) =
1389 sizeof(struct sctp_paramhdr) +
1390 (3 * sizeof(uint32_t));
1392 struct sctp_paramhdr *);
1394 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1396 htons(SCTP_BUF_LEN(oper));
1397 ippp = (uint32_t *) (ph + 1);
1398 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1400 *ippp = chk->rec.data.TSN_seq;
1402 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1405 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1406 sctp_abort_an_association(stcb->sctp_ep,
1407 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1412 if (chk->rec.data.stream_number !=
1413 next->rec.data.stream_number) {
1415 * Huh, need the correct STR here,
1416 * they must be the same.
1418 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1419 chk->rec.data.stream_number,
1420 next->rec.data.stream_number);
1421 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1422 0, M_DONTWAIT, 1, MT_DATA);
1424 struct sctp_paramhdr *ph;
1427 SCTP_BUF_LEN(oper) =
1428 sizeof(struct sctp_paramhdr) +
1429 (3 * sizeof(uint32_t));
1431 struct sctp_paramhdr *);
1433 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1435 htons(SCTP_BUF_LEN(oper));
1436 ippp = (uint32_t *) (ph + 1);
1437 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1439 *ippp = chk->rec.data.TSN_seq;
1441 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1444 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1445 sctp_abort_an_association(stcb->sctp_ep,
1446 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1451 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1452 chk->rec.data.stream_seq !=
1453 next->rec.data.stream_seq) {
1455 * Huh, need the correct STR here,
1456 * they must be the same.
1458 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1459 chk->rec.data.stream_seq,
1460 next->rec.data.stream_seq);
1461 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1462 0, M_DONTWAIT, 1, MT_DATA);
1464 struct sctp_paramhdr *ph;
1467 SCTP_BUF_LEN(oper) =
1468 sizeof(struct sctp_paramhdr) +
1469 (3 * sizeof(uint32_t));
1471 struct sctp_paramhdr *);
1473 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1475 htons(SCTP_BUF_LEN(oper));
1476 ippp = (uint32_t *) (ph + 1);
1477 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1479 *ippp = chk->rec.data.TSN_seq;
1481 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1483 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1484 sctp_abort_an_association(stcb->sctp_ep,
1485 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1493 /* Do we need to do some delivery? check */
1494 sctp_deliver_reasm_check(stcb, asoc);
1498 * This is an unfortunate routine. It checks to make sure a evil guy is not
1499 * stuffing us full of bad packet fragments. A broken peer could also do this
1500 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1504 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1507 struct sctp_tmit_chunk *at;
1510 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1511 if (compare_with_wrap(TSN_seq,
1512 at->rec.data.TSN_seq, MAX_TSN)) {
1513 /* is it one bigger? */
1514 tsn_est = at->rec.data.TSN_seq + 1;
1515 if (tsn_est == TSN_seq) {
1516 /* yep. It better be a last then */
1517 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1518 SCTP_DATA_LAST_FRAG) {
1520 * Ok this guy belongs next to a guy
1521 * that is NOT last, it should be a
1522 * middle/last, not a complete
1528 * This guy is ok since its a LAST
1529 * and the new chunk is a fully
1530 * self- contained one.
1535 } else if (TSN_seq == at->rec.data.TSN_seq) {
1536 /* Software error since I have a dup? */
1540 * Ok, 'at' is larger than new chunk but does it
1541 * need to be right before it.
1543 tsn_est = TSN_seq + 1;
1544 if (tsn_est == at->rec.data.TSN_seq) {
1545 /* Yep, It better be a first */
1546 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1547 SCTP_DATA_FIRST_FRAG) {
1560 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1561 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1562 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1563 int *break_flag, int last_chunk)
1565 /* Process a data chunk */
1566 /* struct sctp_tmit_chunk *chk; */
1567 struct sctp_tmit_chunk *chk;
1570 /* EY - for nr_sack */
1574 int need_reasm_check = 0;
1575 uint16_t strmno, strmseq;
1577 struct sctp_queued_to_read *control;
1579 uint32_t protocol_id;
1580 uint8_t chunk_flags;
1581 struct sctp_stream_reset_list *liste;
1584 tsn = ntohl(ch->dp.tsn);
1585 chunk_flags = ch->ch.chunk_flags;
1586 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1587 asoc->send_sack = 1;
1589 protocol_id = ch->dp.protocol_id;
1590 ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
1591 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1592 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1597 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1598 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1599 asoc->cumulative_tsn == tsn) {
1600 /* It is a duplicate */
1601 SCTP_STAT_INCR(sctps_recvdupdata);
1602 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1603 /* Record a dup for the next outbound sack */
1604 asoc->dup_tsns[asoc->numduptsns] = tsn;
1607 asoc->send_sack = 1;
1610 /* Calculate the number of TSN's between the base and this TSN */
1611 if (tsn >= asoc->mapping_array_base_tsn) {
1612 gap = tsn - asoc->mapping_array_base_tsn;
1614 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1616 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1617 /* Can't hold the bit in the mapping at max array, toss it */
1620 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1621 SCTP_TCB_LOCK_ASSERT(stcb);
1622 if (sctp_expand_mapping_array(asoc, gap)) {
1623 /* Can't expand, drop it */
1627 /* EY - for nr_sack */
1630 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1633 /* See if we have received this one already */
1634 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1635 SCTP_STAT_INCR(sctps_recvdupdata);
1636 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1637 /* Record a dup for the next outbound sack */
1638 asoc->dup_tsns[asoc->numduptsns] = tsn;
1641 asoc->send_sack = 1;
1645 * Check to see about the GONE flag, duplicates would cause a sack
1646 * to be sent up above
1648 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1649 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1650 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1653 * wait a minute, this guy is gone, there is no longer a
1654 * receiver. Send peer an ABORT!
1656 struct mbuf *op_err;
1658 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1659 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1664 * Now before going further we see if there is room. If NOT then we
1665 * MAY let one through only IF this TSN is the one we are waiting
1666 * for on a partial delivery API.
1669 /* now do the tests */
1670 if (((asoc->cnt_on_all_streams +
1671 asoc->cnt_on_reasm_queue +
1672 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1673 (((int)asoc->my_rwnd) <= 0)) {
1675 * When we have NO room in the rwnd we check to make sure
1676 * the reader is doing its job...
1678 if (stcb->sctp_socket->so_rcv.sb_cc) {
1679 /* some to read, wake-up */
1680 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1683 so = SCTP_INP_SO(stcb->sctp_ep);
1684 atomic_add_int(&stcb->asoc.refcnt, 1);
1685 SCTP_TCB_UNLOCK(stcb);
1686 SCTP_SOCKET_LOCK(so, 1);
1687 SCTP_TCB_LOCK(stcb);
1688 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1689 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1690 /* assoc was freed while we were unlocked */
1691 SCTP_SOCKET_UNLOCK(so, 1);
1695 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1696 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1697 SCTP_SOCKET_UNLOCK(so, 1);
1700 /* now is it in the mapping array of what we have accepted? */
1701 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1702 /* Nope not in the valid range dump it */
1703 sctp_set_rwnd(stcb, asoc);
1704 if ((asoc->cnt_on_all_streams +
1705 asoc->cnt_on_reasm_queue +
1706 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1707 SCTP_STAT_INCR(sctps_datadropchklmt);
1709 SCTP_STAT_INCR(sctps_datadroprwnd);
1716 strmno = ntohs(ch->dp.stream_id);
1717 if (strmno >= asoc->streamincnt) {
1718 struct sctp_paramhdr *phdr;
1721 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1722 0, M_DONTWAIT, 1, MT_DATA);
1724 /* add some space up front so prepend will work well */
1725 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1726 phdr = mtod(mb, struct sctp_paramhdr *);
1728 * Error causes are just param's and this one has
1729 * two back to back phdr, one with the error type
1730 * and size, the other with the streamid and a rsvd
1732 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1733 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1734 phdr->param_length =
1735 htons(sizeof(struct sctp_paramhdr) * 2);
1737 /* We insert the stream in the type field */
1738 phdr->param_type = ch->dp.stream_id;
1739 /* And set the length to 0 for the rsvd field */
1740 phdr->param_length = 0;
1741 sctp_queue_op_err(stcb, mb);
1743 SCTP_STAT_INCR(sctps_badsid);
1744 SCTP_TCB_LOCK_ASSERT(stcb);
1745 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1746 /* EY set this tsn present in nr_sack's nr_mapping_array */
1747 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1748 SCTP_TCB_LOCK_ASSERT(stcb);
1749 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1751 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1752 /* we have a new high score */
1753 asoc->highest_tsn_inside_map = tsn;
1754 /* EY nr_sack version of the above */
1755 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
1756 asoc->highest_tsn_inside_nr_map = tsn;
1757 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1758 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1761 if (tsn == (asoc->cumulative_tsn + 1)) {
1762 /* Update cum-ack */
1763 asoc->cumulative_tsn = tsn;
1768 * Before we continue lets validate that we are not being fooled by
1769 * an evil attacker. We can only have 4k chunks based on our TSN
1770 * spread allowed by the mapping array 512 * 8 bits, so there is no
1771 * way our stream sequence numbers could have wrapped. We of course
1772 * only validate the FIRST fragment so the bit must be set.
1774 strmseq = ntohs(ch->dp.stream_sequence);
1775 #ifdef SCTP_ASOCLOG_OF_TSNS
1776 SCTP_TCB_LOCK_ASSERT(stcb);
1777 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1778 asoc->tsn_in_at = 0;
1779 asoc->tsn_in_wrapped = 1;
1781 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1782 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1783 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1784 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1785 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1786 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1787 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1788 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1791 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1792 (TAILQ_EMPTY(&asoc->resetHead)) &&
1793 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1794 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1795 strmseq, MAX_SEQ) ||
1796 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1797 /* The incoming sseq is behind where we last delivered? */
1798 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1799 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1800 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1801 0, M_DONTWAIT, 1, MT_DATA);
1803 struct sctp_paramhdr *ph;
1806 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1807 (3 * sizeof(uint32_t));
1808 ph = mtod(oper, struct sctp_paramhdr *);
1809 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1810 ph->param_length = htons(SCTP_BUF_LEN(oper));
1811 ippp = (uint32_t *) (ph + 1);
1812 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1816 *ippp = ((strmno << 16) | strmseq);
1819 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1820 sctp_abort_an_association(stcb->sctp_ep, stcb,
1821 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1825 /************************************
1826 * From here down we may find ch-> invalid
1827 * so its a good idea NOT to use it.
1828 *************************************/
1830 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1831 if (last_chunk == 0) {
1832 dmbuf = SCTP_M_COPYM(*m,
1833 (offset + sizeof(struct sctp_data_chunk)),
1834 the_len, M_DONTWAIT);
1835 #ifdef SCTP_MBUF_LOGGING
1836 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1841 if (SCTP_BUF_IS_EXTENDED(mat)) {
1842 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1844 mat = SCTP_BUF_NEXT(mat);
1849 /* We can steal the last chunk */
1853 /* lop off the top part */
1854 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1855 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1856 l_len = SCTP_BUF_LEN(dmbuf);
1859 * need to count up the size hopefully does not hit
1867 l_len += SCTP_BUF_LEN(lat);
1868 lat = SCTP_BUF_NEXT(lat);
1871 if (l_len > the_len) {
1872 /* Trim the end round bytes off too */
1873 m_adj(dmbuf, -(l_len - the_len));
1876 if (dmbuf == NULL) {
1877 SCTP_STAT_INCR(sctps_nomem);
1880 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1881 asoc->fragmented_delivery_inprogress == 0 &&
1882 TAILQ_EMPTY(&asoc->resetHead) &&
1884 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1885 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1886 /* Candidate for express delivery */
1888 * Its not fragmented, No PD-API is up, Nothing in the
1889 * delivery queue, Its un-ordered OR ordered and the next to
1890 * deliver AND nothing else is stuck on the stream queue,
1891 * And there is room for it in the socket buffer. Lets just
1892 * stuff it up the buffer....
1895 /* It would be nice to avoid this copy if we could :< */
1896 sctp_alloc_a_readq(stcb, control);
1897 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1903 if (control == NULL) {
1904 goto failed_express_del;
1906 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
1909 * EY here I should check if this delivered tsn is
1910 * out_of_order, if yes then update the nr_map
1912 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1914 * EY check if the mapping_array and nr_mapping
1915 * array are consistent
1917 if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
1920 * sctp_process_a_data_chunk(5): Something
1921 * is wrong the map base tsn" "\nEY-and
1922 * nr_map base tsn should be equal.");
1924 /* EY debugging block */
1927 * printf("\nEY-Calculating an
1928 * nr_gap!!\nmapping_array_size = %d
1929 * nr_mapping_array_size = %d"
1930 * "\nEY-mapping_array_base = %d
1931 * nr_mapping_array_base =
1932 * %d\nEY-highest_tsn_inside_map = %d"
1933 * "highest_tsn_inside_nr_map = %d\nEY-TSN =
1934 * %d nr_gap = %d",asoc->mapping_array_size,
1935 * asoc->nr_mapping_array_size,
1936 * asoc->mapping_array_base_tsn,
1937 * asoc->nr_mapping_array_base_tsn,
1938 * asoc->highest_tsn_inside_map,
1939 * asoc->highest_tsn_inside_nr_map,tsn,nr_gap
1943 /* EY - not %100 sure about the lock thing */
1944 SCTP_TCB_LOCK_ASSERT(stcb);
1945 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
1946 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
1947 asoc->highest_tsn_inside_nr_map = tsn;
1949 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1950 /* for ordered, bump what we delivered */
1951 asoc->strmin[strmno].last_sequence_delivered++;
1953 SCTP_STAT_INCR(sctps_recvexpress);
1954 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1955 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1956 SCTP_STR_LOG_FROM_EXPRS_DEL);
1959 goto finish_express_del;
1962 /* If we reach here this is a new chunk */
1965 /* Express for fragmented delivery? */
1966 if ((asoc->fragmented_delivery_inprogress) &&
1967 (stcb->asoc.control_pdapi) &&
1968 (asoc->str_of_pdapi == strmno) &&
1969 (asoc->ssn_of_pdapi == strmseq)
1971 control = stcb->asoc.control_pdapi;
1972 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1973 /* Can't be another first? */
1974 goto failed_pdapi_express_del;
1976 if (tsn == (control->sinfo_tsn + 1)) {
1977 /* Yep, we can add it on */
1981 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1984 cumack = asoc->cumulative_tsn;
1985 if ((cumack + 1) == tsn)
1988 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1990 &stcb->sctp_socket->so_rcv)) {
1991 SCTP_PRINTF("Append fails end:%d\n", end);
1992 goto failed_pdapi_express_del;
1995 * EY It is appended to the read queue in prev if
1996 * block here I should check if this delivered tsn
1997 * is out_of_order, if yes then update the nr_map
1999 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2000 /* EY debugging block */
2003 * printf("\nEY-Calculating an
2004 * nr_gap!!\nEY-mapping_array_size =
2005 * %d nr_mapping_array_size = %d"
2006 * "\nEY-mapping_array_base = %d
2007 * nr_mapping_array_base =
2008 * %d\nEY-highest_tsn_inside_map =
2009 * %d" "highest_tsn_inside_nr_map =
2010 * %d\nEY-TSN = %d nr_gap =
2011 * %d",asoc->mapping_array_size,
2012 * asoc->nr_mapping_array_size,
2013 * asoc->mapping_array_base_tsn,
2014 * asoc->nr_mapping_array_base_tsn,
2015 * asoc->highest_tsn_inside_map,
2016 * asoc->highest_tsn_inside_nr_map,ts
2020 /* EY - not %100 sure about the lock thing */
2021 SCTP_TCB_LOCK_ASSERT(stcb);
2022 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2023 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2024 asoc->highest_tsn_inside_nr_map = tsn;
2026 SCTP_STAT_INCR(sctps_recvexpressm);
2027 control->sinfo_tsn = tsn;
2028 asoc->tsn_last_delivered = tsn;
2029 asoc->fragment_flags = chunk_flags;
2030 asoc->tsn_of_pdapi_last_delivered = tsn;
2031 asoc->last_flags_delivered = chunk_flags;
2032 asoc->last_strm_seq_delivered = strmseq;
2033 asoc->last_strm_no_delivered = strmno;
2035 /* clean up the flags and such */
2036 asoc->fragmented_delivery_inprogress = 0;
2037 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2038 asoc->strmin[strmno].last_sequence_delivered++;
2040 stcb->asoc.control_pdapi = NULL;
2041 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
2043 * There could be another message
2046 need_reasm_check = 1;
2050 goto finish_express_del;
2053 failed_pdapi_express_del:
2055 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2056 sctp_alloc_a_chunk(stcb, chk);
2058 /* No memory so we drop the chunk */
2059 SCTP_STAT_INCR(sctps_nomem);
2060 if (last_chunk == 0) {
2061 /* we copied it, free the copy */
2062 sctp_m_freem(dmbuf);
2066 chk->rec.data.TSN_seq = tsn;
2067 chk->no_fr_allowed = 0;
2068 chk->rec.data.stream_seq = strmseq;
2069 chk->rec.data.stream_number = strmno;
2070 chk->rec.data.payloadtype = protocol_id;
2071 chk->rec.data.context = stcb->asoc.context;
2072 chk->rec.data.doing_fast_retransmit = 0;
2073 chk->rec.data.rcv_flags = chunk_flags;
2075 chk->send_size = the_len;
2077 atomic_add_int(&net->ref_count, 1);
2080 sctp_alloc_a_readq(stcb, control);
2081 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2087 if (control == NULL) {
2088 /* No memory so we drop the chunk */
2089 SCTP_STAT_INCR(sctps_nomem);
2090 if (last_chunk == 0) {
2091 /* we copied it, free the copy */
2092 sctp_m_freem(dmbuf);
2096 control->length = the_len;
2099 /* Mark it as received */
2100 /* Now queue it where it belongs */
2101 if (control != NULL) {
2102 /* First a sanity check */
2103 if (asoc->fragmented_delivery_inprogress) {
2105 * Ok, we have a fragmented delivery in progress if
2106 * this chunk is next to deliver OR belongs in our
2107 * view to the reassembly, the peer is evil or
2110 uint32_t estimate_tsn;
2112 estimate_tsn = asoc->tsn_last_delivered + 1;
2113 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2114 (estimate_tsn == control->sinfo_tsn)) {
2115 /* Evil/Broke peer */
2116 sctp_m_freem(control->data);
2117 control->data = NULL;
2118 if (control->whoFrom) {
2119 sctp_free_remote_addr(control->whoFrom);
2120 control->whoFrom = NULL;
2122 sctp_free_a_readq(stcb, control);
2123 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2124 0, M_DONTWAIT, 1, MT_DATA);
2126 struct sctp_paramhdr *ph;
2129 SCTP_BUF_LEN(oper) =
2130 sizeof(struct sctp_paramhdr) +
2131 (3 * sizeof(uint32_t));
2132 ph = mtod(oper, struct sctp_paramhdr *);
2134 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2135 ph->param_length = htons(SCTP_BUF_LEN(oper));
2136 ippp = (uint32_t *) (ph + 1);
2137 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
2141 *ippp = ((strmno << 16) | strmseq);
2143 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
2144 sctp_abort_an_association(stcb->sctp_ep, stcb,
2145 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2150 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2151 sctp_m_freem(control->data);
2152 control->data = NULL;
2153 if (control->whoFrom) {
2154 sctp_free_remote_addr(control->whoFrom);
2155 control->whoFrom = NULL;
2157 sctp_free_a_readq(stcb, control);
2159 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2160 0, M_DONTWAIT, 1, MT_DATA);
2162 struct sctp_paramhdr *ph;
2165 SCTP_BUF_LEN(oper) =
2166 sizeof(struct sctp_paramhdr) +
2167 (3 * sizeof(uint32_t));
2169 struct sctp_paramhdr *);
2171 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2173 htons(SCTP_BUF_LEN(oper));
2174 ippp = (uint32_t *) (ph + 1);
2175 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
2179 *ippp = ((strmno << 16) | strmseq);
2181 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2182 sctp_abort_an_association(stcb->sctp_ep,
2183 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2190 /* No PDAPI running */
2191 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2193 * Reassembly queue is NOT empty validate
2194 * that this tsn does not need to be in
2195 * reasembly queue. If it does then our peer
2196 * is broken or evil.
2198 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2199 sctp_m_freem(control->data);
2200 control->data = NULL;
2201 if (control->whoFrom) {
2202 sctp_free_remote_addr(control->whoFrom);
2203 control->whoFrom = NULL;
2205 sctp_free_a_readq(stcb, control);
2206 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2207 0, M_DONTWAIT, 1, MT_DATA);
2209 struct sctp_paramhdr *ph;
2212 SCTP_BUF_LEN(oper) =
2213 sizeof(struct sctp_paramhdr) +
2214 (3 * sizeof(uint32_t));
2216 struct sctp_paramhdr *);
2218 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2220 htons(SCTP_BUF_LEN(oper));
2221 ippp = (uint32_t *) (ph + 1);
2222 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2226 *ippp = ((strmno << 16) | strmseq);
2228 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2229 sctp_abort_an_association(stcb->sctp_ep,
2230 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2237 /* ok, if we reach here we have passed the sanity checks */
2238 if (chunk_flags & SCTP_DATA_UNORDERED) {
2239 /* queue directly into socket buffer */
2240 sctp_add_to_readq(stcb->sctp_ep, stcb,
2242 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
2245 * EY It is added to the read queue in prev if block
2246 * here I should check if this delivered tsn is
2247 * out_of_order, if yes then update the nr_map
2249 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2251 * EY check if the mapping_array and
2252 * nr_mapping array are consistent
2254 if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
2257 * sctp_process_a_data_chunk(6):
2258 * Something is wrong the map base
2259 * tsn" "\nEY-and nr_map base tsn
2260 * should be equal.");
2263 * EY - not %100 sure about the lock
2264 * thing, i think we don't need the
2267 /* SCTP_TCB_LOCK_ASSERT(stcb); */
2270 * printf("\nEY-Calculating an
2271 * nr_gap!!\nEY-mapping_array_size =
2272 * %d nr_mapping_array_size = %d"
2273 * "\nEY-mapping_array_base = %d
2274 * nr_mapping_array_base =
2275 * %d\nEY-highest_tsn_inside_map =
2276 * %d" "highest_tsn_inside_nr_map =
2277 * %d\nEY-TSN = %d nr_gap =
2278 * %d",asoc->mapping_array_size,
2279 * asoc->nr_mapping_array_size,
2280 * asoc->mapping_array_base_tsn,
2281 * asoc->nr_mapping_array_base_tsn,
2282 * asoc->highest_tsn_inside_map,
2283 * asoc->highest_tsn_inside_nr_map,ts
2287 SCTP_TCB_LOCK_ASSERT(stcb);
2288 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2289 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2290 asoc->highest_tsn_inside_nr_map = tsn;
2294 * Special check for when streams are resetting. We
2295 * could be more smart about this and check the
2296 * actual stream to see if it is not being reset..
2297 * that way we would not create a HOLB when amongst
2298 * streams being reset and those not being reset.
2300 * We take complete messages that have a stream reset
2301 * intervening (aka the TSN is after where our
2302 * cum-ack needs to be) off and put them on a
2303 * pending_reply_queue. The reassembly ones we do
2304 * not have to worry about since they are all sorted
2305 * and proceessed by TSN order. It is only the
2306 * singletons I must worry about.
2308 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2309 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2312 * yep its past where we need to reset... go
2313 * ahead and queue it.
2315 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2317 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2319 struct sctp_queued_to_read *ctlOn;
2320 unsigned char inserted = 0;
2322 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2324 if (compare_with_wrap(control->sinfo_tsn,
2325 ctlOn->sinfo_tsn, MAX_TSN)) {
2326 ctlOn = TAILQ_NEXT(ctlOn, next);
2329 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2334 if (inserted == 0) {
2336 * must be put at end, use
2337 * prevP (all setup from
2338 * loop) to setup nextP.
2340 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2344 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2351 /* Into the re-assembly queue */
2352 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2355 * the assoc is now gone and chk was put onto the
2356 * reasm queue, which has all been freed.
2363 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2364 /* we have a new high score */
2365 asoc->highest_tsn_inside_map = tsn;
2366 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2367 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2370 if (tsn == (asoc->cumulative_tsn + 1)) {
2371 /* Update cum-ack */
2372 asoc->cumulative_tsn = tsn;
2378 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2380 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2382 SCTP_STAT_INCR(sctps_recvdata);
2383 /* Set it present please */
2384 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2385 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2387 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2388 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2389 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2391 SCTP_TCB_LOCK_ASSERT(stcb);
2392 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2393 /* check the special flag for stream resets */
2394 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2395 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2396 (asoc->cumulative_tsn == liste->tsn))
2399 * we have finished working through the backlogged TSN's now
2400 * time to reset streams. 1: call reset function. 2: free
2401 * pending_reply space 3: distribute any chunks in
2402 * pending_reply_queue.
2404 struct sctp_queued_to_read *ctl;
2406 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2407 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2408 SCTP_FREE(liste, SCTP_M_STRESET);
2409 /* sa_ignore FREED_MEMORY */
2410 liste = TAILQ_FIRST(&asoc->resetHead);
2411 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2412 if (ctl && (liste == NULL)) {
2413 /* All can be removed */
2415 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2416 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2420 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2423 /* more than one in queue */
2424 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2426 * if ctl->sinfo_tsn is <= liste->tsn we can
2427 * process it which is the NOT of
2428 * ctl->sinfo_tsn > liste->tsn
2430 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2431 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2435 ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2439 * Now service re-assembly to pick up anything that has been
2440 * held on reassembly queue?
2442 sctp_deliver_reasm_check(stcb, asoc);
2443 need_reasm_check = 0;
2445 if (need_reasm_check) {
2446 /* Another one waits ? */
2447 sctp_deliver_reasm_check(stcb, asoc);
2452 int8_t sctp_map_lookup_tab[256] = {
2453 -1, 0, -1, 1, -1, 0, -1, 2,
2454 -1, 0, -1, 1, -1, 0, -1, 3,
2455 -1, 0, -1, 1, -1, 0, -1, 2,
2456 -1, 0, -1, 1, -1, 0, -1, 4,
2457 -1, 0, -1, 1, -1, 0, -1, 2,
2458 -1, 0, -1, 1, -1, 0, -1, 3,
2459 -1, 0, -1, 1, -1, 0, -1, 2,
2460 -1, 0, -1, 1, -1, 0, -1, 5,
2461 -1, 0, -1, 1, -1, 0, -1, 2,
2462 -1, 0, -1, 1, -1, 0, -1, 3,
2463 -1, 0, -1, 1, -1, 0, -1, 2,
2464 -1, 0, -1, 1, -1, 0, -1, 4,
2465 -1, 0, -1, 1, -1, 0, -1, 2,
2466 -1, 0, -1, 1, -1, 0, -1, 3,
2467 -1, 0, -1, 1, -1, 0, -1, 2,
2468 -1, 0, -1, 1, -1, 0, -1, 6,
2469 -1, 0, -1, 1, -1, 0, -1, 2,
2470 -1, 0, -1, 1, -1, 0, -1, 3,
2471 -1, 0, -1, 1, -1, 0, -1, 2,
2472 -1, 0, -1, 1, -1, 0, -1, 4,
2473 -1, 0, -1, 1, -1, 0, -1, 2,
2474 -1, 0, -1, 1, -1, 0, -1, 3,
2475 -1, 0, -1, 1, -1, 0, -1, 2,
2476 -1, 0, -1, 1, -1, 0, -1, 5,
2477 -1, 0, -1, 1, -1, 0, -1, 2,
2478 -1, 0, -1, 1, -1, 0, -1, 3,
2479 -1, 0, -1, 1, -1, 0, -1, 2,
2480 -1, 0, -1, 1, -1, 0, -1, 4,
2481 -1, 0, -1, 1, -1, 0, -1, 2,
2482 -1, 0, -1, 1, -1, 0, -1, 3,
2483 -1, 0, -1, 1, -1, 0, -1, 2,
2484 -1, 0, -1, 1, -1, 0, -1, 7,
2489 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2492 * Now we also need to check the mapping array in a couple of ways.
2493 * 1) Did we move the cum-ack point?
2495 struct sctp_association *asoc;
2497 int last_all_ones = 0;
2498 int slide_from, slide_end, lgap, distance;
2500 /* EY nr_mapping array variables */
2502 int nr_last_all_ones = 0;
2503 int nr_slide_from, nr_slide_end, nr_lgap, nr_distance;
2505 uint32_t old_cumack, old_base, old_highest;
2506 unsigned char aux_array[64];
2509 * EY! Don't think this is required but I am immitating the code for
2510 * map just to make sure
2512 unsigned char nr_aux_array[64];
2517 old_cumack = asoc->cumulative_tsn;
2518 old_base = asoc->mapping_array_base_tsn;
2519 old_highest = asoc->highest_tsn_inside_map;
2520 if (asoc->mapping_array_size < 64)
2521 memcpy(aux_array, asoc->mapping_array,
2522 asoc->mapping_array_size);
2524 memcpy(aux_array, asoc->mapping_array, 64);
2525 /* EY do the same for nr_mapping_array */
2526 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2528 if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
2530 * printf("\nEY-IN sack_check method: \nEY-" "The
2531 * size of map and nr_map are inconsitent")
2534 if (asoc->nr_mapping_array_base_tsn != asoc->mapping_array_base_tsn) {
2536 * printf("\nEY-IN sack_check method VERY CRUCIAL
2537 * error: \nEY-" "The base tsns of map and nr_map
2541 /* EY! just immitating the above code */
2542 if (asoc->nr_mapping_array_size < 64)
2543 memcpy(nr_aux_array, asoc->nr_mapping_array,
2544 asoc->nr_mapping_array_size);
2546 memcpy(aux_array, asoc->nr_mapping_array, 64);
2549 * We could probably improve this a small bit by calculating the
2550 * offset of the current cum-ack as the starting point.
2553 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2555 if (asoc->mapping_array[slide_from] == 0xff) {
2559 /* there is a 0 bit */
2560 at += sctp_map_lookup_tab[asoc->mapping_array[slide_from]];
2565 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones);
2566 /* at is one off, since in the table a embedded -1 is present */
2569 if (compare_with_wrap(asoc->cumulative_tsn,
2570 asoc->highest_tsn_inside_map,
2573 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2574 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2576 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2577 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2578 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2579 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2581 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2582 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2585 if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) {
2586 /* The complete array was completed by a single FR */
2587 /* higest becomes the cum-ack */
2590 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2591 /* clear the array */
2592 clr = (at >> 3) + 1;
2593 if (clr > asoc->mapping_array_size) {
2594 clr = asoc->mapping_array_size;
2596 memset(asoc->mapping_array, 0, clr);
2597 /* base becomes one ahead of the cum-ack */
2598 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2600 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2602 if (clr > asoc->nr_mapping_array_size)
2603 clr = asoc->nr_mapping_array_size;
2605 memset(asoc->nr_mapping_array, 0, clr);
2606 /* base becomes one ahead of the cum-ack */
2607 asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2608 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2610 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2611 sctp_log_map(old_base, old_cumack, old_highest,
2612 SCTP_MAP_PREPARE_SLIDE);
2613 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2614 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2616 } else if (at >= 8) {
2617 /* we can slide the mapping array down */
2618 /* slide_from holds where we hit the first NON 0xff byte */
2621 * now calculate the ceiling of the move using our highest
2624 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2625 lgap = asoc->highest_tsn_inside_map -
2626 asoc->mapping_array_base_tsn;
2628 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2629 asoc->highest_tsn_inside_map + 1;
2631 slide_end = lgap >> 3;
2632 if (slide_end < slide_from) {
2634 panic("impossible slide");
2636 printf("impossible slide?\n");
2640 if (slide_end > asoc->mapping_array_size) {
2642 panic("would overrun buffer");
2644 printf("Gak, would have overrun map end:%d slide_end:%d\n",
2645 asoc->mapping_array_size, slide_end);
2646 slide_end = asoc->mapping_array_size;
2649 distance = (slide_end - slide_from) + 1;
2650 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2651 sctp_log_map(old_base, old_cumack, old_highest,
2652 SCTP_MAP_PREPARE_SLIDE);
2653 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2654 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2656 if (distance + slide_from > asoc->mapping_array_size ||
2659 * Here we do NOT slide forward the array so that
2660 * hopefully when more data comes in to fill it up
2661 * we will be able to slide it forward. Really I
2662 * don't think this should happen :-0
2665 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2666 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2667 (uint32_t) asoc->mapping_array_size,
2668 SCTP_MAP_SLIDE_NONE);
2673 for (ii = 0; ii < distance; ii++) {
2674 asoc->mapping_array[ii] =
2675 asoc->mapping_array[slide_from + ii];
2677 for (ii = distance; ii <= slide_end; ii++) {
2678 asoc->mapping_array[ii] = 0;
2680 asoc->mapping_array_base_tsn += (slide_from << 3);
2681 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2682 sctp_log_map(asoc->mapping_array_base_tsn,
2683 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2684 SCTP_MAP_SLIDE_RESULT);
2689 * EY if doing nr_sacks then slide the nr_mapping_array accordingly
2692 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2695 for (nr_slide_from = 0; nr_slide_from < stcb->asoc.nr_mapping_array_size; nr_slide_from++) {
2697 if (asoc->nr_mapping_array[nr_slide_from] == 0xff) {
2699 nr_last_all_ones = 1;
2701 /* there is a 0 bit */
2702 nr_at += sctp_map_lookup_tab[asoc->nr_mapping_array[nr_slide_from]];
2703 nr_last_all_ones = 0;
2710 if (compare_with_wrap(asoc->cumulative_tsn,
2711 asoc->highest_tsn_inside_nr_map, MAX_TSN) && (at >= 8)) {
2712 /* The complete array was completed by a single FR */
2713 /* higest becomes the cum-ack */
2716 clr = (nr_at >> 3) + 1;
2718 if (clr > asoc->nr_mapping_array_size)
2719 clr = asoc->nr_mapping_array_size;
2721 memset(asoc->nr_mapping_array, 0, clr);
2722 /* base becomes one ahead of the cum-ack */
2723 asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2724 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2726 } else if (nr_at >= 8) {
2727 /* we can slide the mapping array down */
2728 /* Calculate the new byte postion we can move down */
2731 * now calculate the ceiling of the move using our
2734 if (asoc->highest_tsn_inside_nr_map >= asoc->nr_mapping_array_base_tsn) {
2735 nr_lgap = asoc->highest_tsn_inside_nr_map -
2736 asoc->nr_mapping_array_base_tsn;
2738 nr_lgap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) +
2739 asoc->highest_tsn_inside_nr_map + 1;
2741 nr_slide_end = nr_lgap >> 3;
2742 if (nr_slide_end < nr_slide_from) {
2744 panic("impossible slide");
2746 printf("impossible slide?\n");
2750 if (nr_slide_end > asoc->nr_mapping_array_size) {
2752 panic("would overrun buffer");
2754 printf("Gak, would have overrun map end:%d nr_slide_end:%d\n",
2755 asoc->nr_mapping_array_size, nr_slide_end);
2756 nr_slide_end = asoc->nr_mapping_array_size;
2759 nr_distance = (nr_slide_end - nr_slide_from) + 1;
2761 if (nr_distance + nr_slide_from > asoc->nr_mapping_array_size ||
2764 * Here we do NOT slide forward the array so
2765 * that hopefully when more data comes in to
2766 * fill it up we will be able to slide it
2767 * forward. Really I don't think this should
2774 for (ii = 0; ii < nr_distance; ii++) {
2775 asoc->nr_mapping_array[ii] =
2776 asoc->nr_mapping_array[nr_slide_from + ii];
2778 for (ii = nr_distance; ii <= nr_slide_end; ii++) {
2779 asoc->nr_mapping_array[ii] = 0;
2781 asoc->nr_mapping_array_base_tsn += (nr_slide_from << 3);
2786 * Now we need to see if we need to queue a sack or just start the
2787 * timer (if allowed).
2790 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2792 * Ok special case, in SHUTDOWN-SENT case. here we
2793 * maker sure SACK timer is off and instead send a
2794 * SHUTDOWN and a SACK
2796 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2797 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2798 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2800 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2802 * EY if nr_sacks used then send an nr-sack , a sack
2805 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
2806 sctp_send_nr_sack(stcb);
2808 sctp_send_sack(stcb);
2812 /* is there a gap now ? */
2813 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2814 stcb->asoc.cumulative_tsn, MAX_TSN);
2817 * CMT DAC algorithm: increase number of packets
2818 * received since last ack
2820 stcb->asoc.cmt_dac_pkts_rcvd++;
2822 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2824 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2826 (stcb->asoc.numduptsns) || /* we have dup's */
2827 (is_a_gap) || /* is still a gap */
2828 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2829 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2832 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2833 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2834 (stcb->asoc.send_sack == 0) &&
2835 (stcb->asoc.numduptsns == 0) &&
2836 (stcb->asoc.delayed_ack) &&
2837 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2840 * CMT DAC algorithm: With CMT,
2841 * delay acks even in the face of
2843 * reordering. Therefore, if acks that
2844 * do not have to be sent because of
2845 * the above reasons, will be
2846 * delayed. That is, acks that would
2847 * have been sent due to gap reports
2848 * will be delayed with DAC. Start
2849 * the delayed ack timer.
2851 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2852 stcb->sctp_ep, stcb, NULL);
2855 * Ok we must build a SACK since the
2856 * timer is pending, we got our
2857 * first packet OR there are gaps or
2860 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2862 * EY if nr_sacks used then send an
2863 * nr-sack , a sack otherwise
2865 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2866 sctp_send_nr_sack(stcb);
2868 sctp_send_sack(stcb);
2871 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2872 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2873 stcb->sctp_ep, stcb, NULL);
2881 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2883 struct sctp_tmit_chunk *chk;
2887 if (asoc->fragmented_delivery_inprogress) {
2888 sctp_service_reassembly(stcb, asoc);
2890 /* Can we proceed further, i.e. the PD-API is complete */
2891 if (asoc->fragmented_delivery_inprogress) {
2896 * Now is there some other chunk I can deliver from the reassembly
2900 chk = TAILQ_FIRST(&asoc->reasmqueue);
2902 asoc->size_on_reasm_queue = 0;
2903 asoc->cnt_on_reasm_queue = 0;
2906 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2907 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2908 ((nxt_todel == chk->rec.data.stream_seq) ||
2909 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2911 * Yep the first one is here. We setup to start reception,
2912 * by backing down the TSN just in case we can't deliver.
2916 * Before we start though either all of the message should
2917 * be here or 1/4 the socket buffer max or nothing on the
2918 * delivery queue and something can be delivered.
2920 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2921 (tsize >= stcb->sctp_ep->partial_delivery_point))) {
2922 asoc->fragmented_delivery_inprogress = 1;
2923 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2924 asoc->str_of_pdapi = chk->rec.data.stream_number;
2925 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2926 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2927 asoc->fragment_flags = chk->rec.data.rcv_flags;
2928 sctp_service_reassembly(stcb, asoc);
2929 if (asoc->fragmented_delivery_inprogress == 0) {
2937 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2938 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2939 struct sctp_nets *net, uint32_t * high_tsn)
2941 struct sctp_data_chunk *ch, chunk_buf;
2942 struct sctp_association *asoc;
2943 int num_chunks = 0; /* number of control chunks processed */
2945 int chk_length, break_flag, last_chunk;
2946 int abort_flag = 0, was_a_gap = 0;
2950 sctp_set_rwnd(stcb, &stcb->asoc);
2953 SCTP_TCB_LOCK_ASSERT(stcb);
2955 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2956 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2957 /* there was a gap before this data was processed */
2961 * setup where we got the last DATA packet from for any SACK that
2962 * may need to go out. Don't bump the net. This is done ONLY when a
2963 * chunk is assigned.
2965 asoc->last_data_chunk_from = net;
2968 * Now before we proceed we must figure out if this is a wasted
2969 * cluster... i.e. it is a small packet sent in and yet the driver
2970 * underneath allocated a full cluster for it. If so we must copy it
2971 * to a smaller mbuf and free up the cluster mbuf. This will help
2972 * with cluster starvation. Note for __Panda__ we don't do this
2973 * since it has clusters all the way down to 64 bytes.
2975 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2976 /* we only handle mbufs that are singletons.. not chains */
2977 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2979 /* ok lets see if we can copy the data up */
2982 /* get the pointers and copy */
2983 to = mtod(m, caddr_t *);
2984 from = mtod((*mm), caddr_t *);
2985 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2986 /* copy the length and free up the old */
2987 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2989 /* sucess, back copy */
2992 /* We are in trouble in the mbuf world .. yikes */
2996 /* get pointer to the first chunk header */
2997 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2998 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
3003 * process all DATA chunks...
3005 *high_tsn = asoc->cumulative_tsn;
3007 asoc->data_pkts_seen++;
3008 while (stop_proc == 0) {
3009 /* validate chunk length */
3010 chk_length = ntohs(ch->ch.chunk_length);
3011 if (length - *offset < chk_length) {
3012 /* all done, mutulated chunk */
3016 if (ch->ch.chunk_type == SCTP_DATA) {
3017 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
3019 * Need to send an abort since we had a
3020 * invalid data chunk.
3022 struct mbuf *op_err;
3024 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
3025 0, M_DONTWAIT, 1, MT_DATA);
3028 struct sctp_paramhdr *ph;
3031 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
3032 (2 * sizeof(uint32_t));
3033 ph = mtod(op_err, struct sctp_paramhdr *);
3035 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3036 ph->param_length = htons(SCTP_BUF_LEN(op_err));
3037 ippp = (uint32_t *) (ph + 1);
3038 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
3040 *ippp = asoc->cumulative_tsn;
3043 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
3044 sctp_abort_association(inp, stcb, m, iphlen, sh,
3045 op_err, 0, net->port);
3048 #ifdef SCTP_AUDITING_ENABLED
3049 sctp_audit_log(0xB1, 0);
3051 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
3056 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
3057 chk_length, net, high_tsn, &abort_flag, &break_flag,
3066 * Set because of out of rwnd space and no
3067 * drop rep space left.
3073 /* not a data chunk in the data region */
3074 switch (ch->ch.chunk_type) {
3075 case SCTP_INITIATION:
3076 case SCTP_INITIATION_ACK:
3077 case SCTP_SELECTIVE_ACK:
3078 case SCTP_NR_SELECTIVE_ACK: /* EY */
3079 case SCTP_HEARTBEAT_REQUEST:
3080 case SCTP_HEARTBEAT_ACK:
3081 case SCTP_ABORT_ASSOCIATION:
3083 case SCTP_SHUTDOWN_ACK:
3084 case SCTP_OPERATION_ERROR:
3085 case SCTP_COOKIE_ECHO:
3086 case SCTP_COOKIE_ACK:
3089 case SCTP_SHUTDOWN_COMPLETE:
3090 case SCTP_AUTHENTICATION:
3091 case SCTP_ASCONF_ACK:
3092 case SCTP_PACKET_DROPPED:
3093 case SCTP_STREAM_RESET:
3094 case SCTP_FORWARD_CUM_TSN:
3097 * Now, what do we do with KNOWN chunks that
3098 * are NOT in the right place?
3100 * For now, I do nothing but ignore them. We
3101 * may later want to add sysctl stuff to
3102 * switch out and do either an ABORT() or
3103 * possibly process them.
3105 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
3106 struct mbuf *op_err;
3108 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
3109 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
3114 /* unknown chunk type, use bit rules */
3115 if (ch->ch.chunk_type & 0x40) {
3116 /* Add a error report to the queue */
3118 struct sctp_paramhdr *phd;
3120 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
3122 phd = mtod(merr, struct sctp_paramhdr *);
3124 * We cheat and use param
3125 * type since we did not
3126 * bother to define a error
3127 * cause struct. They are
3128 * the same basic format
3129 * with different names.
3132 htons(SCTP_CAUSE_UNRECOG_CHUNK);
3134 htons(chk_length + sizeof(*phd));
3135 SCTP_BUF_LEN(merr) = sizeof(*phd);
3136 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
3137 SCTP_SIZE32(chk_length),
3139 if (SCTP_BUF_NEXT(merr)) {
3140 sctp_queue_op_err(stcb, merr);
3146 if ((ch->ch.chunk_type & 0x80) == 0) {
3147 /* discard the rest of this packet */
3149 } /* else skip this bad chunk and
3152 }; /* switch of chunk type */
3154 *offset += SCTP_SIZE32(chk_length);
3155 if ((*offset >= length) || stop_proc) {
3156 /* no more data left in the mbuf chain */
3160 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
3161 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
3171 * we need to report rwnd overrun drops.
3173 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
3177 * Did we get data, if so update the time for auto-close and
3178 * give peer credit for being alive.
3180 SCTP_STAT_INCR(sctps_recvpktwithdata);
3181 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3182 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3183 stcb->asoc.overall_error_count,
3185 SCTP_FROM_SCTP_INDATA,
3188 stcb->asoc.overall_error_count = 0;
3189 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
3191 /* now service all of the reassm queue if needed */
3192 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
3193 sctp_service_queues(stcb, asoc);
3195 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
3196 /* Assure that we ack right away */
3197 stcb->asoc.send_sack = 1;
3199 /* Start a sack timer or QUEUE a SACK for sending */
3200 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
3201 (stcb->asoc.mapping_array[0] != 0xff)) {
3202 if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) ||
3203 (stcb->asoc.delayed_ack == 0) ||
3204 (stcb->asoc.numduptsns) ||
3205 (stcb->asoc.send_sack == 1)) {
3206 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3207 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
3210 * EY if nr_sacks used then send an nr-sack , a sack
3213 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
3214 sctp_send_nr_sack(stcb);
3216 sctp_send_sack(stcb);
3218 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3219 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
3220 stcb->sctp_ep, stcb, NULL);
3224 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
3233 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3234 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3235 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3236 int num_seg, int *ecn_seg_sums)
3238 /************************************************/
3239 /* process fragments and update sendqueue */
3240 /************************************************/
3241 struct sctp_sack *sack;
3242 struct sctp_gap_ack_block *frag, block;
3243 struct sctp_tmit_chunk *tp1;
3245 unsigned int theTSN;
3248 uint16_t frag_strt, frag_end, primary_flag_set;
3249 u_long last_frag_high;
3252 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
3254 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3255 primary_flag_set = 1;
3257 primary_flag_set = 0;
3261 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3262 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3263 *offset += sizeof(block);
3269 for (i = 0; i < num_seg; i++) {
3270 frag_strt = ntohs(frag->start);
3271 frag_end = ntohs(frag->end);
3272 /* some sanity checks on the fragment offsets */
3273 if (frag_strt > frag_end) {
3274 /* this one is malformed, skip */
3278 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3280 *biggest_tsn_acked = frag_end + last_tsn;
3282 /* mark acked dgs and find out the highestTSN being acked */
3284 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3286 /* save the locations of the last frags */
3287 last_frag_high = frag_end + last_tsn;
3290 * now lets see if we need to reset the queue due to
3291 * a out-of-order SACK fragment
3293 if (compare_with_wrap(frag_strt + last_tsn,
3294 last_frag_high, MAX_TSN)) {
3296 * if the new frag starts after the last TSN
3297 * frag covered, we are ok and this one is
3298 * beyond the last one
3303 * ok, they have reset us, so we need to
3304 * reset the queue this will cause extra
3305 * hunting but hey, they chose the
3306 * performance hit when they failed to order
3309 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3311 last_frag_high = frag_end + last_tsn;
3313 for (j = frag_strt; j <= frag_end; j++) {
3314 theTSN = j + last_tsn;
3316 if (tp1->rec.data.doing_fast_retransmit)
3320 * CMT: CUCv2 algorithm. For each TSN being
3321 * processed from the sent queue, track the
3322 * next expected pseudo-cumack, or
3323 * rtx_pseudo_cumack, if required. Separate
3324 * cumack trackers for first transmissions,
3325 * and retransmissions.
3327 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3328 (tp1->snd_count == 1)) {
3329 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
3330 tp1->whoTo->find_pseudo_cumack = 0;
3332 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3333 (tp1->snd_count > 1)) {
3334 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
3335 tp1->whoTo->find_rtx_pseudo_cumack = 0;
3337 if (tp1->rec.data.TSN_seq == theTSN) {
3338 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3340 * must be held until
3344 * ECN Nonce: Add the nonce
3345 * value to the sender's
3348 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3350 * If it is less than RESEND, it is
3351 * now no-longer in flight.
3352 * Higher values may already be set
3353 * via previous Gap Ack Blocks...
3354 * i.e. ACKED or RESEND.
3356 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3357 *biggest_newly_acked_tsn, MAX_TSN)) {
3358 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
3367 * this_sack_highest_
3371 if (tp1->rec.data.chunk_was_revoked == 0)
3372 tp1->whoTo->saw_newack = 1;
3374 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3375 tp1->whoTo->this_sack_highest_newack,
3377 tp1->whoTo->this_sack_highest_newack =
3378 tp1->rec.data.TSN_seq;
3383 * this_sack_lowest_n
3386 if (*this_sack_lowest_newack == 0) {
3387 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3388 sctp_log_sack(*this_sack_lowest_newack,
3390 tp1->rec.data.TSN_seq,
3393 SCTP_LOG_TSN_ACKED);
3395 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
3400 * (rtx-)pseudo-cumac
3405 * (rtx-)pseudo-cumac
3407 * new_(rtx_)pseudo_c
3415 * (rtx-)pseudo-cumac
3423 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
3424 if (tp1->rec.data.chunk_was_revoked == 0) {
3425 tp1->whoTo->new_pseudo_cumack = 1;
3427 tp1->whoTo->find_pseudo_cumack = 1;
3429 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3430 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3432 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
3433 if (tp1->rec.data.chunk_was_revoked == 0) {
3434 tp1->whoTo->new_pseudo_cumack = 1;
3436 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3438 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3439 sctp_log_sack(*biggest_newly_acked_tsn,
3441 tp1->rec.data.TSN_seq,
3444 SCTP_LOG_TSN_ACKED);
3446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3447 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3448 tp1->whoTo->flight_size,
3450 (uintptr_t) tp1->whoTo,
3451 tp1->rec.data.TSN_seq);
3453 sctp_flight_size_decrease(tp1);
3454 sctp_total_flight_decrease(stcb, tp1);
3456 tp1->whoTo->net_ack += tp1->send_size;
3457 if (tp1->snd_count < 2) {
3463 tp1->whoTo->net_ack2 += tp1->send_size;
3470 sctp_calculate_rto(stcb,
3473 &tp1->sent_rcv_time,
3474 sctp_align_safe_nocopy);
3479 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3480 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3481 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3482 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3483 asoc->this_sack_highest_gap,
3485 asoc->this_sack_highest_gap =
3486 tp1->rec.data.TSN_seq;
3488 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3489 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3490 #ifdef SCTP_AUDITING_ENABLED
3491 sctp_audit_log(0xB2,
3492 (asoc->sent_queue_retran_cnt & 0x000000ff));
3497 * All chunks NOT UNSENT
3498 * fall through here and are
3501 tp1->sent = SCTP_DATAGRAM_MARKED;
3502 if (tp1->rec.data.chunk_was_revoked) {
3503 /* deflate the cwnd */
3504 tp1->whoTo->cwnd -= tp1->book_size;
3505 tp1->rec.data.chunk_was_revoked = 0;
3509 } /* if (tp1->TSN_seq == theTSN) */
3510 if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3514 tp1 = TAILQ_NEXT(tp1, sctp_next);
3515 } /* end while (tp1) */
3516 } /* end for (j = fragStart */
3517 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3518 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3519 *offset += sizeof(block);
3524 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3526 sctp_log_fr(*biggest_tsn_acked,
3527 *biggest_newly_acked_tsn,
3528 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3533 sctp_check_for_revoked(struct sctp_tcb *stcb,
3534 struct sctp_association *asoc, uint32_t cumack,
3535 u_long biggest_tsn_acked)
3537 struct sctp_tmit_chunk *tp1;
3538 int tot_revoked = 0;
3540 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3542 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3545 * ok this guy is either ACK or MARKED. If it is
3546 * ACKED it has been previously acked but not this
3547 * time i.e. revoked. If it is MARKED it was ACK'ed
3550 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3555 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3556 /* it has been revoked */
3557 tp1->sent = SCTP_DATAGRAM_SENT;
3558 tp1->rec.data.chunk_was_revoked = 1;
3560 * We must add this stuff back in to assure
3561 * timers and such get started.
3563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3564 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3565 tp1->whoTo->flight_size,
3567 (uintptr_t) tp1->whoTo,
3568 tp1->rec.data.TSN_seq);
3570 sctp_flight_size_increase(tp1);
3571 sctp_total_flight_increase(stcb, tp1);
3573 * We inflate the cwnd to compensate for our
3574 * artificial inflation of the flight_size.
3576 tp1->whoTo->cwnd += tp1->book_size;
3578 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3579 sctp_log_sack(asoc->last_acked_seq,
3581 tp1->rec.data.TSN_seq,
3584 SCTP_LOG_TSN_REVOKED);
3586 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3587 /* it has been re-acked in this SACK */
3588 tp1->sent = SCTP_DATAGRAM_ACKED;
3591 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3593 tp1 = TAILQ_NEXT(tp1, sctp_next);
3595 if (tot_revoked > 0) {
3597 * Setup the ecn nonce re-sync point. We do this since once
3598 * data is revoked we begin to retransmit things, which do
3599 * NOT have the ECN bits set. This means we are now out of
3600 * sync and must wait until we get back in sync with the
3601 * peer to check ECN bits.
3603 tp1 = TAILQ_FIRST(&asoc->send_queue);
3605 asoc->nonce_resync_tsn = asoc->sending_seq;
3607 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3609 asoc->nonce_wait_for_ecne = 0;
3610 asoc->nonce_sum_check = 0;
3616 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3617 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3619 struct sctp_tmit_chunk *tp1;
3620 int strike_flag = 0;
3622 int tot_retrans = 0;
3623 uint32_t sending_seq;
3624 struct sctp_nets *net;
3625 int num_dests_sacked = 0;
3628 * select the sending_seq, this is either the next thing ready to be
3629 * sent but not transmitted, OR, the next seq we assign.
3631 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3633 sending_seq = asoc->sending_seq;
3635 sending_seq = tp1->rec.data.TSN_seq;
3638 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3639 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3640 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3641 if (net->saw_newack)
3645 if (stcb->asoc.peer_supports_prsctp) {
3646 (void)SCTP_GETTIME_TIMEVAL(&now);
3648 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3651 if (tp1->no_fr_allowed) {
3652 /* this one had a timeout or something */
3653 tp1 = TAILQ_NEXT(tp1, sctp_next);
3656 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3657 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3658 sctp_log_fr(biggest_tsn_newly_acked,
3659 tp1->rec.data.TSN_seq,
3661 SCTP_FR_LOG_CHECK_STRIKE);
3663 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3665 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3669 if (stcb->asoc.peer_supports_prsctp) {
3670 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3671 /* Is it expired? */
3674 * TODO sctp_constants.h needs alternative
3675 * time macros when _KERNEL is undefined.
3677 (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3679 /* Yes so drop it */
3680 if (tp1->data != NULL) {
3681 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3682 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3683 &asoc->sent_queue, SCTP_SO_NOT_LOCKED);
3685 tp1 = TAILQ_NEXT(tp1, sctp_next);
3689 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3690 /* Has it been retransmitted tv_sec times? */
3691 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3692 /* Yes, so drop it */
3693 if (tp1->data != NULL) {
3694 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3695 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3696 &asoc->sent_queue, SCTP_SO_NOT_LOCKED);
3698 tp1 = TAILQ_NEXT(tp1, sctp_next);
3703 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3704 asoc->this_sack_highest_gap, MAX_TSN)) {
3705 /* we are beyond the tsn in the sack */
3708 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3709 /* either a RESEND, ACKED, or MARKED */
3711 tp1 = TAILQ_NEXT(tp1, sctp_next);
3715 * CMT : SFR algo (covers part of DAC and HTNA as well)
3717 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3719 * No new acks were receieved for data sent to this
3720 * dest. Therefore, according to the SFR algo for
3721 * CMT, no data sent to this dest can be marked for
3722 * FR using this SACK.
3724 tp1 = TAILQ_NEXT(tp1, sctp_next);
3726 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3727 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3729 * CMT: New acks were receieved for data sent to
3730 * this dest. But no new acks were seen for data
3731 * sent after tp1. Therefore, according to the SFR
3732 * algo for CMT, tp1 cannot be marked for FR using
3733 * this SACK. This step covers part of the DAC algo
3734 * and the HTNA algo as well.
3736 tp1 = TAILQ_NEXT(tp1, sctp_next);
3740 * Here we check to see if we were have already done a FR
3741 * and if so we see if the biggest TSN we saw in the sack is
3742 * smaller than the recovery point. If so we don't strike
3743 * the tsn... otherwise we CAN strike the TSN.
3746 * @@@ JRI: Check for CMT if (accum_moved &&
3747 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3750 if (accum_moved && asoc->fast_retran_loss_recovery) {
3752 * Strike the TSN if in fast-recovery and cum-ack
3755 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3756 sctp_log_fr(biggest_tsn_newly_acked,
3757 tp1->rec.data.TSN_seq,
3759 SCTP_FR_LOG_STRIKE_CHUNK);
3761 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3764 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3766 * CMT DAC algorithm: If SACK flag is set to
3767 * 0, then lowest_newack test will not pass
3768 * because it would have been set to the
3769 * cumack earlier. If not already to be
3770 * rtx'd, If not a mixed sack and if tp1 is
3771 * not between two sacked TSNs, then mark by
3772 * one more. NOTE that we are marking by one
3773 * additional time since the SACK DAC flag
3774 * indicates that two packets have been
3775 * received after this missing TSN.
3777 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3778 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3779 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3780 sctp_log_fr(16 + num_dests_sacked,
3781 tp1->rec.data.TSN_seq,
3783 SCTP_FR_LOG_STRIKE_CHUNK);
3788 } else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3790 * For those that have done a FR we must take
3791 * special consideration if we strike. I.e the
3792 * biggest_newly_acked must be higher than the
3793 * sending_seq at the time we did the FR.
3796 #ifdef SCTP_FR_TO_ALTERNATE
3798 * If FR's go to new networks, then we must only do
3799 * this for singly homed asoc's. However if the FR's
3800 * go to the same network (Armando's work) then its
3801 * ok to FR multiple times.
3809 if ((compare_with_wrap(biggest_tsn_newly_acked,
3810 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3811 (biggest_tsn_newly_acked ==
3812 tp1->rec.data.fast_retran_tsn)) {
3814 * Strike the TSN, since this ack is
3815 * beyond where things were when we
3818 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3819 sctp_log_fr(biggest_tsn_newly_acked,
3820 tp1->rec.data.TSN_seq,
3822 SCTP_FR_LOG_STRIKE_CHUNK);
3824 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3828 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3830 * CMT DAC algorithm: If
3831 * SACK flag is set to 0,
3832 * then lowest_newack test
3833 * will not pass because it
3834 * would have been set to
3835 * the cumack earlier. If
3836 * not already to be rtx'd,
3837 * If not a mixed sack and
3838 * if tp1 is not between two
3839 * sacked TSNs, then mark by
3840 * one more. NOTE that we
3841 * are marking by one
3842 * additional time since the
3843 * SACK DAC flag indicates
3844 * that two packets have
3845 * been received after this
3848 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3849 (num_dests_sacked == 1) &&
3850 compare_with_wrap(this_sack_lowest_newack,
3851 tp1->rec.data.TSN_seq, MAX_TSN)) {
3852 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3853 sctp_log_fr(32 + num_dests_sacked,
3854 tp1->rec.data.TSN_seq,
3856 SCTP_FR_LOG_STRIKE_CHUNK);
3858 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3866 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3869 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3870 biggest_tsn_newly_acked, MAX_TSN)) {
3872 * We don't strike these: This is the HTNA
3873 * algorithm i.e. we don't strike If our TSN is
3874 * larger than the Highest TSN Newly Acked.
3878 /* Strike the TSN */
3879 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3880 sctp_log_fr(biggest_tsn_newly_acked,
3881 tp1->rec.data.TSN_seq,
3883 SCTP_FR_LOG_STRIKE_CHUNK);
3885 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3888 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3890 * CMT DAC algorithm: If SACK flag is set to
3891 * 0, then lowest_newack test will not pass
3892 * because it would have been set to the
3893 * cumack earlier. If not already to be
3894 * rtx'd, If not a mixed sack and if tp1 is
3895 * not between two sacked TSNs, then mark by
3896 * one more. NOTE that we are marking by one
3897 * additional time since the SACK DAC flag
3898 * indicates that two packets have been
3899 * received after this missing TSN.
3901 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3902 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3903 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3904 sctp_log_fr(48 + num_dests_sacked,
3905 tp1->rec.data.TSN_seq,
3907 SCTP_FR_LOG_STRIKE_CHUNK);
3913 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3914 /* Increment the count to resend */
3915 struct sctp_nets *alt;
3917 /* printf("OK, we are now ready to FR this guy\n"); */
3918 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3919 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3923 /* This is a subsequent FR */
3924 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3926 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3927 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3929 * CMT: Using RTX_SSTHRESH policy for CMT.
3930 * If CMT is being used, then pick dest with
3931 * largest ssthresh for any retransmission.
3933 tp1->no_fr_allowed = 1;
3935 /* sa_ignore NO_NULL_CHK */
3936 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3938 * JRS 5/18/07 - If CMT PF is on,
3939 * use the PF version of
3942 alt = sctp_find_alternate_net(stcb, alt, 2);
3945 * JRS 5/18/07 - If only CMT is on,
3946 * use the CMT version of
3949 /* sa_ignore NO_NULL_CHK */
3950 alt = sctp_find_alternate_net(stcb, alt, 1);
3956 * CUCv2: If a different dest is picked for
3957 * the retransmission, then new
3958 * (rtx-)pseudo_cumack needs to be tracked
3959 * for orig dest. Let CUCv2 track new (rtx-)
3960 * pseudo-cumack always.
3963 tp1->whoTo->find_pseudo_cumack = 1;
3964 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3966 } else {/* CMT is OFF */
3968 #ifdef SCTP_FR_TO_ALTERNATE
3969 /* Can we find an alternate? */
3970 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3973 * default behavior is to NOT retransmit
3974 * FR's to an alternate. Armando Caro's
3975 * paper details why.
3981 tp1->rec.data.doing_fast_retransmit = 1;
3983 /* mark the sending seq for possible subsequent FR's */
3985 * printf("Marking TSN for FR new value %x\n",
3986 * (uint32_t)tpi->rec.data.TSN_seq);
3988 if (TAILQ_EMPTY(&asoc->send_queue)) {
3990 * If the queue of send is empty then its
3991 * the next sequence number that will be
3992 * assigned so we subtract one from this to
3993 * get the one we last sent.
3995 tp1->rec.data.fast_retran_tsn = sending_seq;
3998 * If there are chunks on the send queue
3999 * (unsent data that has made it from the
4000 * stream queues but not out the door, we
4001 * take the first one (which will have the
4002 * lowest TSN) and subtract one to get the
4005 struct sctp_tmit_chunk *ttt;
4007 ttt = TAILQ_FIRST(&asoc->send_queue);
4008 tp1->rec.data.fast_retran_tsn =
4009 ttt->rec.data.TSN_seq;
4014 * this guy had a RTO calculation pending on
4019 /* fix counts and things */
4020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4021 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
4022 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
4024 (uintptr_t) tp1->whoTo,
4025 tp1->rec.data.TSN_seq);
4028 tp1->whoTo->net_ack++;
4029 sctp_flight_size_decrease(tp1);
4031 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4032 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
4033 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
4035 /* add back to the rwnd */
4036 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
4038 /* remove from the total flight */
4039 sctp_total_flight_decrease(stcb, tp1);
4040 if (alt != tp1->whoTo) {
4041 /* yes, there is an alternate. */
4042 sctp_free_remote_addr(tp1->whoTo);
4043 /* sa_ignore FREED_MEMORY */
4045 atomic_add_int(&alt->ref_count, 1);
4048 tp1 = TAILQ_NEXT(tp1, sctp_next);
4051 if (tot_retrans > 0) {
4053 * Setup the ecn nonce re-sync point. We do this since once
4054 * we go to FR something we introduce a Karn's rule scenario
4055 * and won't know the totals for the ECN bits.
4057 asoc->nonce_resync_tsn = sending_seq;
4058 asoc->nonce_wait_for_ecne = 0;
4059 asoc->nonce_sum_check = 0;
4063 struct sctp_tmit_chunk *
4064 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
4065 struct sctp_association *asoc)
4067 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
4071 if (asoc->peer_supports_prsctp == 0) {
4074 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4076 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
4077 tp1->sent != SCTP_DATAGRAM_RESEND) {
4078 /* no chance to advance, out of here */
4081 if (!PR_SCTP_ENABLED(tp1->flags)) {
4083 * We can't fwd-tsn past any that are reliable aka
4084 * retransmitted until the asoc fails.
4089 (void)SCTP_GETTIME_TIMEVAL(&now);
4092 tp2 = TAILQ_NEXT(tp1, sctp_next);
4094 * now we got a chunk which is marked for another
4095 * retransmission to a PR-stream but has run out its chances
4096 * already maybe OR has been marked to skip now. Can we skip
4097 * it if its a resend?
4099 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
4100 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
4102 * Now is this one marked for resend and its time is
4105 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
4106 /* Yes so drop it */
4108 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
4109 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
4110 &asoc->sent_queue, SCTP_SO_NOT_LOCKED);
4114 * No, we are done when hit one for resend
4115 * whos time as not expired.
4121 * Ok now if this chunk is marked to drop it we can clean up
4122 * the chunk, advance our peer ack point and we can check
4125 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4126 /* advance PeerAckPoint goes forward */
4127 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
4131 * If it is still in RESEND we can advance no
4137 * If we hit here we just dumped tp1, move to next tsn on
4146 sctp_fs_audit(struct sctp_association *asoc)
4148 struct sctp_tmit_chunk *chk;
4149 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
4151 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4152 if (chk->sent < SCTP_DATAGRAM_RESEND) {
4154 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
4156 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
4158 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
4165 if ((inflight > 0) || (inbetween > 0)) {
4167 panic("Flight size-express incorrect? \n");
4169 SCTP_PRINTF("Flight size-express incorrect inflight:%d inbetween:%d\n",
4170 inflight, inbetween);
4177 sctp_window_probe_recovery(struct sctp_tcb *stcb,
4178 struct sctp_association *asoc,
4179 struct sctp_nets *net,
4180 struct sctp_tmit_chunk *tp1)
4182 struct sctp_tmit_chunk *chk;
4184 /* First setup this one and get it moved back */
4185 tp1->sent = SCTP_DATAGRAM_UNSENT;
4186 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4187 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
4188 tp1->whoTo->flight_size,
4190 (uintptr_t) tp1->whoTo,
4191 tp1->rec.data.TSN_seq);
4193 sctp_flight_size_decrease(tp1);
4194 sctp_total_flight_decrease(stcb, tp1);
4195 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4196 TAILQ_INSERT_HEAD(&asoc->send_queue, tp1, sctp_next);
4197 asoc->sent_queue_cnt--;
4198 asoc->send_queue_cnt++;
4200 * Now all guys marked for RESEND on the sent_queue must be moved
4203 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4204 if (chk->sent == SCTP_DATAGRAM_RESEND) {
4205 /* Another chunk to move */
4206 chk->sent = SCTP_DATAGRAM_UNSENT;
4207 /* It should not be in flight */
4208 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
4209 TAILQ_INSERT_AFTER(&asoc->send_queue, tp1, chk, sctp_next);
4210 asoc->sent_queue_cnt--;
4211 asoc->send_queue_cnt++;
4212 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4218 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4219 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
4221 struct sctp_nets *net;
4222 struct sctp_association *asoc;
4223 struct sctp_tmit_chunk *tp1, *tp2;
4225 int win_probe_recovery = 0;
4226 int win_probe_recovered = 0;
4227 int j, done_once = 0;
4229 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4230 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
4231 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4233 SCTP_TCB_LOCK_ASSERT(stcb);
4234 #ifdef SCTP_ASOCLOG_OF_TSNS
4235 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
4236 stcb->asoc.cumack_log_at++;
4237 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4238 stcb->asoc.cumack_log_at = 0;
4242 old_rwnd = asoc->peers_rwnd;
4243 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
4246 } else if (asoc->last_acked_seq == cumack) {
4247 /* Window update sack */
4248 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4249 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4250 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4251 /* SWS sender side engages */
4252 asoc->peers_rwnd = 0;
4254 if (asoc->peers_rwnd > old_rwnd) {
4259 /* First setup for CC stuff */
4260 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4261 net->prev_cwnd = net->cwnd;
4266 * CMT: Reset CUC and Fast recovery algo variables before
4269 net->new_pseudo_cumack = 0;
4270 net->will_exit_fast_recovery = 0;
4272 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4275 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4276 tp1 = TAILQ_LAST(&asoc->sent_queue,
4277 sctpchunk_listhead);
4278 send_s = tp1->rec.data.TSN_seq + 1;
4280 send_s = asoc->sending_seq;
4282 if ((cumack == send_s) ||
4283 compare_with_wrap(cumack, send_s, MAX_TSN)) {
4289 panic("Impossible sack 1");
4293 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4294 0, M_DONTWAIT, 1, MT_DATA);
4296 struct sctp_paramhdr *ph;
4299 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4301 ph = mtod(oper, struct sctp_paramhdr *);
4302 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4303 ph->param_length = htons(SCTP_BUF_LEN(oper));
4304 ippp = (uint32_t *) (ph + 1);
4305 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4307 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4308 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4313 asoc->this_sack_highest_gap = cumack;
4314 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4315 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4316 stcb->asoc.overall_error_count,
4318 SCTP_FROM_SCTP_INDATA,
4321 stcb->asoc.overall_error_count = 0;
4322 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
4323 /* process the new consecutive TSN first */
4324 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4326 tp2 = TAILQ_NEXT(tp1, sctp_next);
4327 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
4329 cumack == tp1->rec.data.TSN_seq) {
4330 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4331 printf("Warning, an unsent is now acked?\n");
4334 * ECN Nonce: Add the nonce to the sender's
4337 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4338 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4340 * If it is less than ACKED, it is
4341 * now no-longer in flight. Higher
4342 * values may occur during marking
4344 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4345 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4346 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4347 tp1->whoTo->flight_size,
4349 (uintptr_t) tp1->whoTo,
4350 tp1->rec.data.TSN_seq);
4352 sctp_flight_size_decrease(tp1);
4353 /* sa_ignore NO_NULL_CHK */
4354 sctp_total_flight_decrease(stcb, tp1);
4356 tp1->whoTo->net_ack += tp1->send_size;
4357 if (tp1->snd_count < 2) {
4359 * True non-retransmited
4362 tp1->whoTo->net_ack2 +=
4365 /* update RTO too? */
4372 sctp_calculate_rto(stcb,
4374 &tp1->sent_rcv_time,
4375 sctp_align_safe_nocopy);
4380 * CMT: CUCv2 algorithm. From the
4381 * cumack'd TSNs, for each TSN being
4382 * acked for the first time, set the
4383 * following variables for the
4384 * corresp destination.
4385 * new_pseudo_cumack will trigger a
4387 * find_(rtx_)pseudo_cumack will
4388 * trigger search for the next
4389 * expected (rtx-)pseudo-cumack.
4391 tp1->whoTo->new_pseudo_cumack = 1;
4392 tp1->whoTo->find_pseudo_cumack = 1;
4393 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4396 /* sa_ignore NO_NULL_CHK */
4397 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4400 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4401 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4403 if (tp1->rec.data.chunk_was_revoked) {
4404 /* deflate the cwnd */
4405 tp1->whoTo->cwnd -= tp1->book_size;
4406 tp1->rec.data.chunk_was_revoked = 0;
4408 tp1->sent = SCTP_DATAGRAM_ACKED;
4409 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4411 /* sa_ignore NO_NULL_CHK */
4412 sctp_free_bufspace(stcb, asoc, tp1, 1);
4413 sctp_m_freem(tp1->data);
4415 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4416 sctp_log_sack(asoc->last_acked_seq,
4418 tp1->rec.data.TSN_seq,
4421 SCTP_LOG_FREE_SENT);
4424 asoc->sent_queue_cnt--;
4425 sctp_free_a_chunk(stcb, tp1);
4433 /* sa_ignore NO_NULL_CHK */
4434 if (stcb->sctp_socket) {
4435 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4440 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4441 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4442 /* sa_ignore NO_NULL_CHK */
4443 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4445 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4446 so = SCTP_INP_SO(stcb->sctp_ep);
4447 atomic_add_int(&stcb->asoc.refcnt, 1);
4448 SCTP_TCB_UNLOCK(stcb);
4449 SCTP_SOCKET_LOCK(so, 1);
4450 SCTP_TCB_LOCK(stcb);
4451 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4452 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4453 /* assoc was freed while we were unlocked */
4454 SCTP_SOCKET_UNLOCK(so, 1);
4458 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4459 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4460 SCTP_SOCKET_UNLOCK(so, 1);
4463 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4464 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4468 /* JRS - Use the congestion control given in the CC module */
4469 if (asoc->last_acked_seq != cumack)
4470 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4472 asoc->last_acked_seq = cumack;
4474 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4475 /* nothing left in-flight */
4476 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4477 net->flight_size = 0;
4478 net->partial_bytes_acked = 0;
4480 asoc->total_flight = 0;
4481 asoc->total_flight_count = 0;
4483 /* Fix up the a-p-a-p for future PR-SCTP sends */
4484 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4485 asoc->advanced_peer_ack_point = cumack;
4487 /* ECN Nonce updates */
4488 if (asoc->ecn_nonce_allowed) {
4489 if (asoc->nonce_sum_check) {
4490 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4491 if (asoc->nonce_wait_for_ecne == 0) {
4492 struct sctp_tmit_chunk *lchk;
4494 lchk = TAILQ_FIRST(&asoc->send_queue);
4495 asoc->nonce_wait_for_ecne = 1;
4497 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4499 asoc->nonce_wait_tsn = asoc->sending_seq;
4502 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4503 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4505 * Misbehaving peer. We need
4506 * to react to this guy
4508 asoc->ecn_allowed = 0;
4509 asoc->ecn_nonce_allowed = 0;
4514 /* See if Resynchronization Possible */
4515 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4516 asoc->nonce_sum_check = 1;
4518 * now we must calculate what the base is.
4519 * We do this based on two things, we know
4520 * the total's for all the segments
4521 * gap-acked in the SACK (none), We also
4522 * know the SACK's nonce sum, its in
4523 * nonce_sum_flag. So we can build a truth
4524 * table to back-calculate the new value of
4525 * asoc->nonce_sum_expect_base:
4527 * SACK-flag-Value Seg-Sums Base 0 0 0
4531 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4536 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4537 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4538 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4539 /* SWS sender side engages */
4540 asoc->peers_rwnd = 0;
4542 if (asoc->peers_rwnd > old_rwnd) {
4543 win_probe_recovery = 1;
4545 /* Now assure a timer where data is queued at */
4548 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4549 if (win_probe_recovery && (net->window_probe)) {
4550 net->window_probe = 0;
4551 win_probe_recovered = 1;
4553 * Find first chunk that was used with window probe
4554 * and clear the sent
4556 /* sa_ignore FREED_MEMORY */
4557 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4558 if (tp1->window_probe) {
4559 /* move back to data send queue */
4560 sctp_window_probe_recovery(stcb, asoc, net, tp1);
4565 if (net->flight_size) {
4568 if (net->RTO == 0) {
4569 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4571 to_ticks = MSEC_TO_TICKS(net->RTO);
4574 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4575 sctp_timeout_handler, &net->rxt_timer);
4577 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4578 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4580 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4582 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4583 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4584 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4585 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4586 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4592 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4593 (asoc->sent_queue_retran_cnt == 0) &&
4594 (win_probe_recovered == 0) &&
4596 /* huh, this should not happen */
4597 sctp_fs_audit(asoc);
4598 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4599 net->flight_size = 0;
4601 asoc->total_flight = 0;
4602 asoc->total_flight_count = 0;
4603 asoc->sent_queue_retran_cnt = 0;
4604 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4605 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4606 sctp_flight_size_increase(tp1);
4607 sctp_total_flight_increase(stcb, tp1);
4608 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4609 asoc->sent_queue_retran_cnt++;
4615 /**********************************/
4616 /* Now what about shutdown issues */
4617 /**********************************/
4618 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4619 /* nothing left on sendqueue.. consider done */
4621 if ((asoc->stream_queue_cnt == 1) &&
4622 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4623 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4624 (asoc->locked_on_sending)
4626 struct sctp_stream_queue_pending *sp;
4629 * I may be in a state where we got all across.. but
4630 * cannot write more due to a shutdown... we abort
4631 * since the user did not indicate EOR in this case.
4632 * The sp will be cleaned during free of the asoc.
4634 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4636 if ((sp) && (sp->length == 0)) {
4637 /* Let cleanup code purge it */
4638 if (sp->msg_is_complete) {
4639 asoc->stream_queue_cnt--;
4641 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4642 asoc->locked_on_sending = NULL;
4643 asoc->stream_queue_cnt--;
4647 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4648 (asoc->stream_queue_cnt == 0)) {
4649 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4650 /* Need to abort here */
4656 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4657 0, M_DONTWAIT, 1, MT_DATA);
4659 struct sctp_paramhdr *ph;
4662 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4664 ph = mtod(oper, struct sctp_paramhdr *);
4665 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4666 ph->param_length = htons(SCTP_BUF_LEN(oper));
4667 ippp = (uint32_t *) (ph + 1);
4668 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4670 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4671 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4673 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4674 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4675 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4677 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4678 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4679 sctp_stop_timers_for_shutdown(stcb);
4680 sctp_send_shutdown(stcb,
4681 stcb->asoc.primary_destination);
4682 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4683 stcb->sctp_ep, stcb, asoc->primary_destination);
4684 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4685 stcb->sctp_ep, stcb, asoc->primary_destination);
4687 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4688 (asoc->stream_queue_cnt == 0)) {
4689 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4692 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4693 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4694 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4695 sctp_send_shutdown_ack(stcb,
4696 stcb->asoc.primary_destination);
4698 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4699 stcb->sctp_ep, stcb, asoc->primary_destination);
4702 /* PR-Sctp issues need to be addressed too */
4703 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4704 struct sctp_tmit_chunk *lchk;
4705 uint32_t old_adv_peer_ack_point;
4707 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4708 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4709 /* C3. See if we need to send a Fwd-TSN */
4710 if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4713 * ISSUE with ECN, see FWD-TSN processing for notes
4714 * on issues that will occur when the ECN NONCE
4715 * stuff is put into SCTP for cross checking.
4717 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4719 send_forward_tsn(stcb, asoc);
4721 * ECN Nonce: Disable Nonce Sum check when
4722 * FWD TSN is sent and store resync tsn
4724 asoc->nonce_sum_check = 0;
4725 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4729 /* Assure a timer is up */
4730 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4731 stcb->sctp_ep, stcb, lchk->whoTo);
4734 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4735 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4737 stcb->asoc.peers_rwnd,
4738 stcb->asoc.total_flight,
4739 stcb->asoc.total_output_queue_size);
4744 sctp_handle_sack(struct mbuf *m, int offset,
4745 struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4746 struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd)
4748 struct sctp_association *asoc;
4749 struct sctp_sack *sack;
4750 struct sctp_tmit_chunk *tp1, *tp2;
4751 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4752 this_sack_lowest_newack;
4753 uint32_t sav_cum_ack;
4754 uint16_t num_seg, num_dup;
4755 uint16_t wake_him = 0;
4756 unsigned int sack_length;
4757 uint32_t send_s = 0;
4759 int accum_moved = 0;
4760 int will_exit_fast_recovery = 0;
4761 uint32_t a_rwnd, old_rwnd;
4762 int win_probe_recovery = 0;
4763 int win_probe_recovered = 0;
4764 struct sctp_nets *net = NULL;
4765 int nonce_sum_flag, ecn_seg_sums = 0;
4767 uint8_t reneged_all = 0;
4768 uint8_t cmt_dac_flag;
4771 * we take any chance we can to service our queues since we cannot
4772 * get awoken when the socket is read from :<
4775 * Now perform the actual SACK handling: 1) Verify that it is not an
4776 * old sack, if so discard. 2) If there is nothing left in the send
4777 * queue (cum-ack is equal to last acked) then you have a duplicate
4778 * too, update any rwnd change and verify no timers are running.
4779 * then return. 3) Process any new consequtive data i.e. cum-ack
4780 * moved process these first and note that it moved. 4) Process any
4781 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4782 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4783 * sync up flightsizes and things, stop all timers and also check
4784 * for shutdown_pending state. If so then go ahead and send off the
4785 * shutdown. If in shutdown recv, send off the shutdown-ack and
4786 * start that timer, Ret. 9) Strike any non-acked things and do FR
4787 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4788 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4789 * if in shutdown_recv state.
4791 SCTP_TCB_LOCK_ASSERT(stcb);
4794 this_sack_lowest_newack = 0;
4796 sack_length = (unsigned int)sack_len;
4798 SCTP_STAT_INCR(sctps_slowpath_sack);
4799 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4800 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4801 #ifdef SCTP_ASOCLOG_OF_TSNS
4802 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4803 stcb->asoc.cumack_log_at++;
4804 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4805 stcb->asoc.cumack_log_at = 0;
4808 num_seg = ntohs(sack->num_gap_ack_blks);
4811 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4812 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4813 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4816 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4817 num_dup = ntohs(sack->num_dup_tsns);
4819 old_rwnd = stcb->asoc.peers_rwnd;
4820 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4821 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4822 stcb->asoc.overall_error_count,
4824 SCTP_FROM_SCTP_INDATA,
4827 stcb->asoc.overall_error_count = 0;
4829 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4830 sctp_log_sack(asoc->last_acked_seq,
4837 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4838 int off_to_dup, iii;
4839 uint32_t *dupdata, dblock;
4841 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4842 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4843 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4844 sizeof(uint32_t), (uint8_t *) & dblock);
4845 off_to_dup += sizeof(uint32_t);
4847 for (iii = 0; iii < num_dup; iii++) {
4848 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4849 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4850 sizeof(uint32_t), (uint8_t *) & dblock);
4851 if (dupdata == NULL)
4853 off_to_dup += sizeof(uint32_t);
4857 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4858 off_to_dup, num_dup, sack_length, num_seg);
4861 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4863 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4864 tp1 = TAILQ_LAST(&asoc->sent_queue,
4865 sctpchunk_listhead);
4866 send_s = tp1->rec.data.TSN_seq + 1;
4868 send_s = asoc->sending_seq;
4870 if (cum_ack == send_s ||
4871 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4878 panic("Impossible sack 1");
4883 * no way, we have not even sent this TSN out yet.
4884 * Peer is hopelessly messed up with us.
4889 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4890 0, M_DONTWAIT, 1, MT_DATA);
4892 struct sctp_paramhdr *ph;
4895 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4897 ph = mtod(oper, struct sctp_paramhdr *);
4898 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4899 ph->param_length = htons(SCTP_BUF_LEN(oper));
4900 ippp = (uint32_t *) (ph + 1);
4901 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4903 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4904 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4909 /**********************/
4910 /* 1) check the range */
4911 /**********************/
4912 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4913 /* acking something behind */
4916 sav_cum_ack = asoc->last_acked_seq;
4918 /* update the Rwnd of the peer */
4919 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4920 TAILQ_EMPTY(&asoc->send_queue) &&
4921 (asoc->stream_queue_cnt == 0)
4923 /* nothing left on send/sent and strmq */
4924 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4925 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4926 asoc->peers_rwnd, 0, 0, a_rwnd);
4928 asoc->peers_rwnd = a_rwnd;
4929 if (asoc->sent_queue_retran_cnt) {
4930 asoc->sent_queue_retran_cnt = 0;
4932 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4933 /* SWS sender side engages */
4934 asoc->peers_rwnd = 0;
4936 /* stop any timers */
4937 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4938 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4939 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4940 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4941 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4942 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4943 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4944 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4947 net->partial_bytes_acked = 0;
4948 net->flight_size = 0;
4950 asoc->total_flight = 0;
4951 asoc->total_flight_count = 0;
4955 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4956 * things. The total byte count acked is tracked in netAckSz AND
4957 * netAck2 is used to track the total bytes acked that are un-
4958 * amibguious and were never retransmitted. We track these on a per
4959 * destination address basis.
4961 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4962 net->prev_cwnd = net->cwnd;
4967 * CMT: Reset CUC and Fast recovery algo variables before
4970 net->new_pseudo_cumack = 0;
4971 net->will_exit_fast_recovery = 0;
4973 /* process the new consecutive TSN first */
4974 tp1 = TAILQ_FIRST(&asoc->sent_queue);
4976 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4978 last_tsn == tp1->rec.data.TSN_seq) {
4979 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4981 * ECN Nonce: Add the nonce to the sender's
4984 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4986 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4988 * If it is less than ACKED, it is
4989 * now no-longer in flight. Higher
4990 * values may occur during marking
4992 if ((tp1->whoTo->dest_state &
4993 SCTP_ADDR_UNCONFIRMED) &&
4994 (tp1->snd_count < 2)) {
4996 * If there was no retran
4997 * and the address is
4998 * un-confirmed and we sent
5000 * sacked.. its confirmed,
5003 tp1->whoTo->dest_state &=
5004 ~SCTP_ADDR_UNCONFIRMED;
5006 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5007 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5008 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
5009 tp1->whoTo->flight_size,
5011 (uintptr_t) tp1->whoTo,
5012 tp1->rec.data.TSN_seq);
5014 sctp_flight_size_decrease(tp1);
5015 sctp_total_flight_decrease(stcb, tp1);
5017 tp1->whoTo->net_ack += tp1->send_size;
5019 /* CMT SFR and DAC algos */
5020 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
5021 tp1->whoTo->saw_newack = 1;
5023 if (tp1->snd_count < 2) {
5025 * True non-retransmited
5028 tp1->whoTo->net_ack2 +=
5031 /* update RTO too? */
5034 sctp_calculate_rto(stcb,
5036 &tp1->sent_rcv_time,
5037 sctp_align_safe_nocopy);
5042 * CMT: CUCv2 algorithm. From the
5043 * cumack'd TSNs, for each TSN being
5044 * acked for the first time, set the
5045 * following variables for the
5046 * corresp destination.
5047 * new_pseudo_cumack will trigger a
5049 * find_(rtx_)pseudo_cumack will
5050 * trigger search for the next
5051 * expected (rtx-)pseudo-cumack.
5053 tp1->whoTo->new_pseudo_cumack = 1;
5054 tp1->whoTo->find_pseudo_cumack = 1;
5055 tp1->whoTo->find_rtx_pseudo_cumack = 1;
5058 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5059 sctp_log_sack(asoc->last_acked_seq,
5061 tp1->rec.data.TSN_seq,
5064 SCTP_LOG_TSN_ACKED);
5066 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
5067 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
5070 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5071 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
5072 #ifdef SCTP_AUDITING_ENABLED
5073 sctp_audit_log(0xB3,
5074 (asoc->sent_queue_retran_cnt & 0x000000ff));
5077 if (tp1->rec.data.chunk_was_revoked) {
5078 /* deflate the cwnd */
5079 tp1->whoTo->cwnd -= tp1->book_size;
5080 tp1->rec.data.chunk_was_revoked = 0;
5082 tp1->sent = SCTP_DATAGRAM_ACKED;
5087 tp1 = TAILQ_NEXT(tp1, sctp_next);
5089 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
5090 /* always set this up to cum-ack */
5091 asoc->this_sack_highest_gap = last_tsn;
5093 /* Move offset up to point to gaps/dups */
5094 offset += sizeof(struct sctp_sack_chunk);
5095 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
5097 /* skip corrupt segments */
5103 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
5104 * to be greater than the cumack. Also reset saw_newack to 0
5107 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5108 net->saw_newack = 0;
5109 net->this_sack_highest_newack = last_tsn;
5113 * thisSackHighestGap will increase while handling NEW
5114 * segments this_sack_highest_newack will increase while
5115 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
5116 * used for CMT DAC algo. saw_newack will also change.
5118 sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn,
5119 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
5120 num_seg, &ecn_seg_sums);
5122 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
5124 * validate the biggest_tsn_acked in the gap acks if
5125 * strict adherence is wanted.
5127 if ((biggest_tsn_acked == send_s) ||
5128 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
5130 * peer is either confused or we are under
5131 * attack. We must abort.
5138 /*******************************************/
5139 /* cancel ALL T3-send timer if accum moved */
5140 /*******************************************/
5141 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
5142 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5143 if (net->new_pseudo_cumack)
5144 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5146 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
5151 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5152 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5153 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
5157 /********************************************/
5158 /* drop the acked chunks from the sendqueue */
5159 /********************************************/
5160 asoc->last_acked_seq = cum_ack;
5162 tp1 = TAILQ_FIRST(&asoc->sent_queue);
5166 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
5170 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
5171 /* no more sent on list */
5172 printf("Warning, tp1->sent == %d and its now acked?\n",
5175 tp2 = TAILQ_NEXT(tp1, sctp_next);
5176 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
5177 if (tp1->pr_sctp_on) {
5178 if (asoc->pr_sctp_cnt != 0)
5179 asoc->pr_sctp_cnt--;
5181 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
5182 (asoc->total_flight > 0)) {
5184 panic("Warning flight size is postive and should be 0");
5186 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
5187 asoc->total_flight);
5189 asoc->total_flight = 0;
5192 /* sa_ignore NO_NULL_CHK */
5193 sctp_free_bufspace(stcb, asoc, tp1, 1);
5194 sctp_m_freem(tp1->data);
5195 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5196 asoc->sent_queue_cnt_removeable--;
5199 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5200 sctp_log_sack(asoc->last_acked_seq,
5202 tp1->rec.data.TSN_seq,
5205 SCTP_LOG_FREE_SENT);
5208 asoc->sent_queue_cnt--;
5209 sctp_free_a_chunk(stcb, tp1);
5212 } while (tp1 != NULL);
5215 /* sa_ignore NO_NULL_CHK */
5216 if ((wake_him) && (stcb->sctp_socket)) {
5217 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5221 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
5222 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5223 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
5225 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5226 so = SCTP_INP_SO(stcb->sctp_ep);
5227 atomic_add_int(&stcb->asoc.refcnt, 1);
5228 SCTP_TCB_UNLOCK(stcb);
5229 SCTP_SOCKET_LOCK(so, 1);
5230 SCTP_TCB_LOCK(stcb);
5231 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5232 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5233 /* assoc was freed while we were unlocked */
5234 SCTP_SOCKET_UNLOCK(so, 1);
5238 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
5239 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5240 SCTP_SOCKET_UNLOCK(so, 1);
5243 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5244 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
5248 if (asoc->fast_retran_loss_recovery && accum_moved) {
5249 if (compare_with_wrap(asoc->last_acked_seq,
5250 asoc->fast_recovery_tsn, MAX_TSN) ||
5251 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
5252 /* Setup so we will exit RFC2582 fast recovery */
5253 will_exit_fast_recovery = 1;
5257 * Check for revoked fragments:
5259 * if Previous sack - Had no frags then we can't have any revoked if
5260 * Previous sack - Had frag's then - If we now have frags aka
5261 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
5262 * some of them. else - The peer revoked all ACKED fragments, since
5263 * we had some before and now we have NONE.
5267 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
5268 else if (asoc->saw_sack_with_frags) {
5269 int cnt_revoked = 0;
5271 tp1 = TAILQ_FIRST(&asoc->sent_queue);
5273 /* Peer revoked all dg's marked or acked */
5274 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5275 if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
5276 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
5277 tp1->sent = SCTP_DATAGRAM_SENT;
5278 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5279 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
5280 tp1->whoTo->flight_size,
5282 (uintptr_t) tp1->whoTo,
5283 tp1->rec.data.TSN_seq);
5285 sctp_flight_size_increase(tp1);
5286 sctp_total_flight_increase(stcb, tp1);
5287 tp1->rec.data.chunk_was_revoked = 1;
5289 * To ensure that this increase in
5290 * flightsize, which is artificial,
5291 * does not throttle the sender, we
5292 * also increase the cwnd
5295 tp1->whoTo->cwnd += tp1->book_size;
5303 asoc->saw_sack_with_frags = 0;
5306 asoc->saw_sack_with_frags = 1;
5308 asoc->saw_sack_with_frags = 0;
5310 /* JRS - Use the congestion control given in the CC module */
5311 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5313 if (TAILQ_EMPTY(&asoc->sent_queue)) {
5314 /* nothing left in-flight */
5315 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5316 /* stop all timers */
5317 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5318 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5319 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5320 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5321 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
5324 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5325 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5326 net->flight_size = 0;
5327 net->partial_bytes_acked = 0;
5329 asoc->total_flight = 0;
5330 asoc->total_flight_count = 0;
5332 /**********************************/
5333 /* Now what about shutdown issues */
5334 /**********************************/
5335 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5336 /* nothing left on sendqueue.. consider done */
5337 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5338 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5339 asoc->peers_rwnd, 0, 0, a_rwnd);
5341 asoc->peers_rwnd = a_rwnd;
5342 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5343 /* SWS sender side engages */
5344 asoc->peers_rwnd = 0;
5347 if ((asoc->stream_queue_cnt == 1) &&
5348 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5349 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5350 (asoc->locked_on_sending)
5352 struct sctp_stream_queue_pending *sp;
5355 * I may be in a state where we got all across.. but
5356 * cannot write more due to a shutdown... we abort
5357 * since the user did not indicate EOR in this case.
5359 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5361 if ((sp) && (sp->length == 0)) {
5362 asoc->locked_on_sending = NULL;
5363 if (sp->msg_is_complete) {
5364 asoc->stream_queue_cnt--;
5366 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5367 asoc->stream_queue_cnt--;
5371 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5372 (asoc->stream_queue_cnt == 0)) {
5373 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5374 /* Need to abort here */
5380 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5381 0, M_DONTWAIT, 1, MT_DATA);
5383 struct sctp_paramhdr *ph;
5386 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5388 ph = mtod(oper, struct sctp_paramhdr *);
5389 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5390 ph->param_length = htons(SCTP_BUF_LEN(oper));
5391 ippp = (uint32_t *) (ph + 1);
5392 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5394 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5395 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5398 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5399 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5400 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5402 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5403 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5404 sctp_stop_timers_for_shutdown(stcb);
5405 sctp_send_shutdown(stcb,
5406 stcb->asoc.primary_destination);
5407 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5408 stcb->sctp_ep, stcb, asoc->primary_destination);
5409 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5410 stcb->sctp_ep, stcb, asoc->primary_destination);
5413 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5414 (asoc->stream_queue_cnt == 0)) {
5415 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5418 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5419 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5420 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5421 sctp_send_shutdown_ack(stcb,
5422 stcb->asoc.primary_destination);
5424 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5425 stcb->sctp_ep, stcb, asoc->primary_destination);
5430 * Now here we are going to recycle net_ack for a different use...
5433 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5438 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5439 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5440 * automatically ensure that.
5442 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5443 this_sack_lowest_newack = cum_ack;
5446 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5447 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5449 /*********************************************/
5450 /* Here we perform PR-SCTP procedures */
5452 /*********************************************/
5453 /* C1. update advancedPeerAckPoint */
5454 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5455 asoc->advanced_peer_ack_point = cum_ack;
5457 /* JRS - Use the congestion control given in the CC module */
5458 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5460 /******************************************************************
5461 * Here we do the stuff with ECN Nonce checking.
5462 * We basically check to see if the nonce sum flag was incorrect
5463 * or if resynchronization needs to be done. Also if we catch a
5464 * misbehaving receiver we give him the kick.
5465 ******************************************************************/
5467 if (asoc->ecn_nonce_allowed) {
5468 if (asoc->nonce_sum_check) {
5469 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5470 if (asoc->nonce_wait_for_ecne == 0) {
5471 struct sctp_tmit_chunk *lchk;
5473 lchk = TAILQ_FIRST(&asoc->send_queue);
5474 asoc->nonce_wait_for_ecne = 1;
5476 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5478 asoc->nonce_wait_tsn = asoc->sending_seq;
5481 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5482 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5484 * Misbehaving peer. We need
5485 * to react to this guy
5487 asoc->ecn_allowed = 0;
5488 asoc->ecn_nonce_allowed = 0;
5493 /* See if Resynchronization Possible */
5494 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5495 asoc->nonce_sum_check = 1;
5497 * now we must calculate what the base is.
5498 * We do this based on two things, we know
5499 * the total's for all the segments
5500 * gap-acked in the SACK, its stored in
5501 * ecn_seg_sums. We also know the SACK's
5502 * nonce sum, its in nonce_sum_flag. So we
5503 * can build a truth table to back-calculate
5505 * asoc->nonce_sum_expect_base:
5507 * SACK-flag-Value Seg-Sums Base 0 0 0
5511 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5515 /* Now are we exiting loss recovery ? */
5516 if (will_exit_fast_recovery) {
5517 /* Ok, we must exit fast recovery */
5518 asoc->fast_retran_loss_recovery = 0;
5520 if ((asoc->sat_t3_loss_recovery) &&
5521 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5523 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5524 /* end satellite t3 loss recovery */
5525 asoc->sat_t3_loss_recovery = 0;
5530 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5531 if (net->will_exit_fast_recovery) {
5532 /* Ok, we must exit fast recovery */
5533 net->fast_retran_loss_recovery = 0;
5537 /* Adjust and set the new rwnd value */
5538 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5539 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5540 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5542 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5543 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5544 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5545 /* SWS sender side engages */
5546 asoc->peers_rwnd = 0;
5548 if (asoc->peers_rwnd > old_rwnd) {
5549 win_probe_recovery = 1;
5552 * Now we must setup so we have a timer up for anyone with
5558 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5559 if (win_probe_recovery && (net->window_probe)) {
5560 net->window_probe = 0;
5561 win_probe_recovered = 1;
5563 * Find first chunk that was used with
5564 * window probe and clear the event. Put
5565 * it back into the send queue as if has
5568 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5569 if (tp1->window_probe) {
5570 sctp_window_probe_recovery(stcb, asoc, net, tp1);
5575 if (net->flight_size) {
5577 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5578 stcb->sctp_ep, stcb, net);
5580 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5581 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5583 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5585 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5586 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5587 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5588 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5589 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5595 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5596 (asoc->sent_queue_retran_cnt == 0) &&
5597 (win_probe_recovered == 0) &&
5599 /* huh, this should not happen */
5600 sctp_fs_audit(asoc);
5601 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5602 net->flight_size = 0;
5604 asoc->total_flight = 0;
5605 asoc->total_flight_count = 0;
5606 asoc->sent_queue_retran_cnt = 0;
5607 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5608 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5609 sctp_flight_size_increase(tp1);
5610 sctp_total_flight_increase(stcb, tp1);
5611 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5612 asoc->sent_queue_retran_cnt++;
5618 /* C2. try to further move advancedPeerAckPoint ahead */
5619 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5620 struct sctp_tmit_chunk *lchk;
5621 uint32_t old_adv_peer_ack_point;
5623 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5624 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5625 /* C3. See if we need to send a Fwd-TSN */
5626 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5629 * ISSUE with ECN, see FWD-TSN processing for notes
5630 * on issues that will occur when the ECN NONCE
5631 * stuff is put into SCTP for cross checking.
5633 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5635 send_forward_tsn(stcb, asoc);
5637 * ECN Nonce: Disable Nonce Sum check when
5638 * FWD TSN is sent and store resync tsn
5640 asoc->nonce_sum_check = 0;
5641 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5645 /* Assure a timer is up */
5646 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5647 stcb->sctp_ep, stcb, lchk->whoTo);
5650 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5651 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5653 stcb->asoc.peers_rwnd,
5654 stcb->asoc.total_flight,
5655 stcb->asoc.total_output_queue_size);
5660 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5661 struct sctp_nets *netp, int *abort_flag)
5664 uint32_t cum_ack, a_rwnd;
5666 cum_ack = ntohl(cp->cumulative_tsn_ack);
5667 /* Arrange so a_rwnd does NOT change */
5668 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5670 /* Now call the express sack handling */
5671 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5675 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5676 struct sctp_stream_in *strmin)
5678 struct sctp_queued_to_read *ctl, *nctl;
5679 struct sctp_association *asoc;
5682 /* EY -used to calculate nr_gap information */
5683 uint32_t nr_tsn, nr_gap;
5686 tt = strmin->last_sequence_delivered;
5688 * First deliver anything prior to and including the stream no that
5691 ctl = TAILQ_FIRST(&strmin->inqueue);
5693 nctl = TAILQ_NEXT(ctl, next);
5694 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5695 (tt == ctl->sinfo_ssn)) {
5696 /* this is deliverable now */
5697 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5698 /* subtract pending on streams */
5699 asoc->size_on_all_streams -= ctl->length;
5700 sctp_ucount_decr(asoc->cnt_on_all_streams);
5701 /* deliver it to at least the delivery-q */
5702 if (stcb->sctp_socket) {
5703 /* EY need the tsn info for calculating nr */
5704 nr_tsn = ctl->sinfo_tsn;
5705 sctp_add_to_readq(stcb->sctp_ep, stcb,
5707 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5709 * EY this is the chunk that should be
5710 * tagged nr gapped calculate the gap and
5711 * such then tag this TSN nr
5712 * chk->rec.data.TSN_seq
5714 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5716 if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
5717 nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
5719 nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
5721 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5722 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5724 * EY These should never
5725 * happen- explained before
5728 SCTP_TCB_LOCK_ASSERT(stcb);
5729 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5730 if (nr_tsn > asoc->highest_tsn_inside_nr_map)
5731 asoc->highest_tsn_inside_nr_map = nr_tsn;
5734 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5737 * sctp_kick_prsctp_reorder_q
5738 * ueue(7): Something wrong,
5739 * the TSN to be tagged"
5740 * "\nas NR is not even in
5741 * the mapping_array, or map
5746 * EY - not %100 sure about
5747 * the lock thing, don't
5748 * think its required
5751 * SCTP_TCB_LOCK_ASSERT(stcb)
5756 * printf("\nCalculating an
5757 * nr_gap!!\nmapping_array_si
5759 * nr_mapping_array_size =
5760 * %d" "\nmapping_array_base
5762 * nr_mapping_array_base =
5763 * %d\nhighest_tsn_inside_map
5765 * "highest_tsn_inside_nr_map
5766 * = %d\nTSN = %d nr_gap =
5767 * %d",asoc->mapping_array_si
5769 * asoc->nr_mapping_array_siz
5771 * asoc->mapping_array_base_t
5773 * asoc->nr_mapping_array_bas
5775 * asoc->highest_tsn_inside_m
5777 * asoc->highest_tsn_inside_n
5778 * r_map,tsn,nr_gap);
5784 /* no more delivery now. */
5790 * now we must deliver things in queue the normal way if any are
5793 tt = strmin->last_sequence_delivered + 1;
5794 ctl = TAILQ_FIRST(&strmin->inqueue);
5796 nctl = TAILQ_NEXT(ctl, next);
5797 if (tt == ctl->sinfo_ssn) {
5798 /* this is deliverable now */
5799 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5800 /* subtract pending on streams */
5801 asoc->size_on_all_streams -= ctl->length;
5802 sctp_ucount_decr(asoc->cnt_on_all_streams);
5803 /* deliver it to at least the delivery-q */
5804 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5805 if (stcb->sctp_socket) {
5807 nr_tsn = ctl->sinfo_tsn;
5808 sctp_add_to_readq(stcb->sctp_ep, stcb,
5810 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5812 * EY this is the chunk that should be
5813 * tagged nr gapped calculate the gap and
5814 * such then tag this TSN nr
5815 * chk->rec.data.TSN_seq
5817 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5819 if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
5820 nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
5822 nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
5824 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5825 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5827 * EY These should never
5828 * happen, explained before
5831 SCTP_TCB_LOCK_ASSERT(stcb);
5832 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5833 if (nr_tsn > asoc->highest_tsn_inside_nr_map)
5834 asoc->highest_tsn_inside_nr_map = nr_tsn;
5838 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5841 * sctp_kick_prsctp_reorder_q
5842 * ueue(8): Something wrong,
5843 * the TSN to be tagged"
5844 * "\nas NR is not even in
5845 * the mapping_array, or map
5850 * EY - not %100 sure about
5851 * the lock thing, don't
5852 * think its required
5855 * SCTP_TCB_LOCK_ASSERT(stcb)
5860 * printf("\nCalculating an
5861 * nr_gap!!\nmapping_array_si
5863 * nr_mapping_array_size =
5864 * %d" "\nmapping_array_base
5866 * nr_mapping_array_base =
5867 * %d\nhighest_tsn_inside_map
5869 * "highest_tsn_inside_nr_map
5870 * = %d\nTSN = %d nr_gap =
5871 * %d",asoc->mapping_array_si
5873 * asoc->nr_mapping_array_siz
5875 * asoc->mapping_array_base_t
5877 * asoc->nr_mapping_array_bas
5879 * asoc->highest_tsn_inside_m
5881 * asoc->highest_tsn_inside_n
5882 * r_map,tsn,nr_gap);
5887 tt = strmin->last_sequence_delivered + 1;
5896 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5897 struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
5900 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5901 * forward TSN, when the SACK comes back that acknowledges the
5902 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5903 * get quite tricky since we may have sent more data interveneing
5904 * and must carefully account for what the SACK says on the nonce
5905 * and any gaps that are reported. This work will NOT be done here,
5906 * but I note it here since it is really related to PR-SCTP and
5910 /* The pr-sctp fwd tsn */
5912 * here we will perform all the data receiver side steps for
5913 * processing FwdTSN, as required in by pr-sctp draft:
5915 * Assume we get FwdTSN(x):
5917 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5918 * others we have 3) examine and update re-ordering queue on
5919 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5920 * report where we are.
5922 struct sctp_association *asoc;
5923 uint32_t new_cum_tsn, gap;
5924 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
5925 struct sctp_stream_in *strm;
5926 struct sctp_tmit_chunk *chk, *at;
5928 cumack_set_flag = 0;
5931 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5932 SCTPDBG(SCTP_DEBUG_INDATA1,
5933 "Bad size too small/big fwd-tsn\n");
5936 m_size = (stcb->asoc.mapping_array_size << 3);
5937 /*************************************************************/
5938 /* 1. Here we update local cumTSN and shift the bitmap array */
5939 /*************************************************************/
5940 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5942 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5943 asoc->cumulative_tsn == new_cum_tsn) {
5944 /* Already got there ... */
5947 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
5949 asoc->highest_tsn_inside_map = new_cum_tsn;
5950 /* EY nr_mapping_array version of the above */
5952 * if(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
5953 * asoc->peer_supports_nr_sack)
5955 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5956 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5957 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5961 * now we know the new TSN is more advanced, let's find the actual
5964 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
5966 (new_cum_tsn == asoc->mapping_array_base_tsn)) {
5967 gap = new_cum_tsn - asoc->mapping_array_base_tsn;
5969 /* try to prevent underflow here */
5970 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
5973 if (gap >= m_size) {
5974 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5975 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5977 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5981 * out of range (of single byte chunks in the rwnd I
5982 * give out). This must be an attacker.
5985 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5986 0, M_DONTWAIT, 1, MT_DATA);
5988 struct sctp_paramhdr *ph;
5991 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5992 (sizeof(uint32_t) * 3);
5993 ph = mtod(oper, struct sctp_paramhdr *);
5994 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5995 ph->param_length = htons(SCTP_BUF_LEN(oper));
5996 ippp = (uint32_t *) (ph + 1);
5997 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5999 *ippp = asoc->highest_tsn_inside_map;
6001 *ippp = new_cum_tsn;
6003 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
6004 sctp_abort_an_association(stcb->sctp_ep, stcb,
6005 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6008 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
6010 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
6011 cumack_set_flag = 1;
6012 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
6013 asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
6014 /* EY - nr_sack: nr_mapping_array version of the above */
6015 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
6016 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
6017 asoc->nr_mapping_array_base_tsn = new_cum_tsn + 1;
6018 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6019 if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
6021 * printf("IN sctp_handle_forward_tsn:
6022 * Something is wrong the size of" "map and
6023 * nr_map should be equal!")
6027 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6028 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6030 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
6032 SCTP_TCB_LOCK_ASSERT(stcb);
6033 if ((compare_with_wrap(((uint32_t) asoc->cumulative_tsn + gap), asoc->highest_tsn_inside_map, MAX_TSN)) ||
6034 (((uint32_t) asoc->cumulative_tsn + gap) == asoc->highest_tsn_inside_map)) {
6037 for (i = 0; i <= gap; i++) {
6038 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
6042 * Now after marking all, slide thing forward but no sack
6045 sctp_sack_check(stcb, 0, 0, abort_flag);
6050 /*************************************************************/
6051 /* 2. Clear up re-assembly queue */
6052 /*************************************************************/
6054 * First service it if pd-api is up, just in case we can progress it
6057 if (asoc->fragmented_delivery_inprogress) {
6058 sctp_service_reassembly(stcb, asoc);
6060 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
6061 /* For each one on here see if we need to toss it */
6063 * For now large messages held on the reasmqueue that are
6064 * complete will be tossed too. We could in theory do more
6065 * work to spin through and stop after dumping one msg aka
6066 * seeing the start of a new msg at the head, and call the
6067 * delivery function... to see if it can be delivered... But
6068 * for now we just dump everything on the queue.
6070 chk = TAILQ_FIRST(&asoc->reasmqueue);
6072 at = TAILQ_NEXT(chk, sctp_next);
6073 if (compare_with_wrap(asoc->cumulative_tsn,
6074 chk->rec.data.TSN_seq, MAX_TSN) ||
6075 asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
6076 /* It needs to be tossed */
6077 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
6078 if (compare_with_wrap(chk->rec.data.TSN_seq,
6079 asoc->tsn_last_delivered, MAX_TSN)) {
6080 asoc->tsn_last_delivered =
6081 chk->rec.data.TSN_seq;
6082 asoc->str_of_pdapi =
6083 chk->rec.data.stream_number;
6084 asoc->ssn_of_pdapi =
6085 chk->rec.data.stream_seq;
6086 asoc->fragment_flags =
6087 chk->rec.data.rcv_flags;
6089 asoc->size_on_reasm_queue -= chk->send_size;
6090 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
6093 /* Clear up any stream problem */
6094 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
6095 SCTP_DATA_UNORDERED &&
6096 (compare_with_wrap(chk->rec.data.stream_seq,
6097 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
6100 * We must dump forward this streams
6101 * sequence number if the chunk is
6102 * not unordered that is being
6103 * skipped. There is a chance that
6104 * if the peer does not include the
6105 * last fragment in its FWD-TSN we
6106 * WILL have a problem here since
6107 * you would have a partial chunk in
6108 * queue that may not be
6109 * deliverable. Also if a Partial
6110 * delivery API as started the user
6111 * may get a partial chunk. The next
6112 * read returning a new chunk...
6113 * really ugly but I see no way
6114 * around it! Maybe a notify??
6116 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
6117 chk->rec.data.stream_seq;
6120 sctp_m_freem(chk->data);
6123 sctp_free_a_chunk(stcb, chk);
6126 * Ok we have gone beyond the end of the
6127 * fwd-tsn's mark. Some checks...
6129 if ((asoc->fragmented_delivery_inprogress) &&
6130 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
6134 * Special case PD-API is up and
6135 * what we fwd-tsn' over includes
6136 * one that had the LAST_FRAG. We no
6137 * longer need to do the PD-API.
6139 asoc->fragmented_delivery_inprogress = 0;
6141 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
6142 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
6143 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
6151 if (asoc->fragmented_delivery_inprogress) {
6153 * Ok we removed cnt_gone chunks in the PD-API queue that
6154 * were being delivered. So now we must turn off the flag.
6158 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
6159 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
6160 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
6161 asoc->fragmented_delivery_inprogress = 0;
6163 /*************************************************************/
6164 /* 3. Update the PR-stream re-ordering queues */
6165 /*************************************************************/
6166 fwd_sz -= sizeof(*fwd);
6169 unsigned int num_str;
6170 struct sctp_strseq *stseq, strseqbuf;
6172 offset += sizeof(*fwd);
6174 num_str = fwd_sz / sizeof(struct sctp_strseq);
6175 for (i = 0; i < num_str; i++) {
6178 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
6179 sizeof(struct sctp_strseq),
6180 (uint8_t *) & strseqbuf);
6181 offset += sizeof(struct sctp_strseq);
6182 if (stseq == NULL) {
6186 st = ntohs(stseq->stream);
6188 st = ntohs(stseq->sequence);
6189 stseq->sequence = st;
6191 if (stseq->stream >= asoc->streamincnt) {
6192 /* screwed up streams, stop! */
6195 strm = &asoc->strmin[stseq->stream];
6196 if (compare_with_wrap(stseq->sequence,
6197 strm->last_sequence_delivered, MAX_SEQ)) {
6198 /* Update the sequence number */
6199 strm->last_sequence_delivered =
6202 /* now kick the stream the new way */
6203 /* sa_ignore NO_NULL_CHK */
6204 sctp_kick_prsctp_reorder_queue(stcb, strm);
6207 if (TAILQ_FIRST(&asoc->reasmqueue)) {
6208 /* now lets kick out and check for more fragmented delivery */
6209 /* sa_ignore NO_NULL_CHK */
6210 sctp_deliver_reasm_check(stcb, &stcb->asoc);
6214 /* EY fully identical to sctp_express_handle_sack, duplicated for only naming convention */
6216 sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
6217 uint32_t rwnd, int nonce_sum_flag, int *abort_now)
6219 struct sctp_nets *net;
6220 struct sctp_association *asoc;
6221 struct sctp_tmit_chunk *tp1, *tp2;
6223 int win_probe_recovery = 0;
6224 int win_probe_recovered = 0;
6225 int j, done_once = 0;
6227 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
6228 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
6229 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
6231 SCTP_TCB_LOCK_ASSERT(stcb);
6232 #ifdef SCTP_ASOCLOG_OF_TSNS
6233 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
6234 stcb->asoc.cumack_log_at++;
6235 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
6236 stcb->asoc.cumack_log_at = 0;
6240 old_rwnd = asoc->peers_rwnd;
6241 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
6244 } else if (asoc->last_acked_seq == cumack) {
6245 /* Window update sack */
6246 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6247 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6248 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6249 /* SWS sender side engages */
6250 asoc->peers_rwnd = 0;
6252 if (asoc->peers_rwnd > old_rwnd) {
6257 /* First setup for CC stuff */
6258 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6259 net->prev_cwnd = net->cwnd;
6264 * CMT: Reset CUC and Fast recovery algo variables before
6267 net->new_pseudo_cumack = 0;
6268 net->will_exit_fast_recovery = 0;
6270 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
6273 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
6274 tp1 = TAILQ_LAST(&asoc->sent_queue,
6275 sctpchunk_listhead);
6276 send_s = tp1->rec.data.TSN_seq + 1;
6278 send_s = asoc->sending_seq;
6280 if ((cumack == send_s) ||
6281 compare_with_wrap(cumack, send_s, MAX_TSN)) {
6287 panic("Impossible sack 1");
6291 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6292 0, M_DONTWAIT, 1, MT_DATA);
6294 struct sctp_paramhdr *ph;
6297 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6299 ph = mtod(oper, struct sctp_paramhdr *);
6300 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6301 ph->param_length = htons(SCTP_BUF_LEN(oper));
6302 ippp = (uint32_t *) (ph + 1);
6303 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
6305 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
6306 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6311 asoc->this_sack_highest_gap = cumack;
6312 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
6313 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
6314 stcb->asoc.overall_error_count,
6316 SCTP_FROM_SCTP_INDATA,
6319 stcb->asoc.overall_error_count = 0;
6320 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
6321 /* process the new consecutive TSN first */
6322 tp1 = TAILQ_FIRST(&asoc->sent_queue);
6324 tp2 = TAILQ_NEXT(tp1, sctp_next);
6325 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
6327 cumack == tp1->rec.data.TSN_seq) {
6328 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
6329 printf("Warning, an unsent is now acked?\n");
6332 * ECN Nonce: Add the nonce to the sender's
6335 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
6336 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
6338 * If it is less than ACKED, it is
6339 * now no-longer in flight. Higher
6340 * values may occur during marking
6342 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6343 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6344 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
6345 tp1->whoTo->flight_size,
6347 (uintptr_t) tp1->whoTo,
6348 tp1->rec.data.TSN_seq);
6350 sctp_flight_size_decrease(tp1);
6351 /* sa_ignore NO_NULL_CHK */
6352 sctp_total_flight_decrease(stcb, tp1);
6354 tp1->whoTo->net_ack += tp1->send_size;
6355 if (tp1->snd_count < 2) {
6357 * True non-retransmited
6360 tp1->whoTo->net_ack2 +=
6363 /* update RTO too? */
6370 sctp_calculate_rto(stcb,
6372 &tp1->sent_rcv_time,
6373 sctp_align_safe_nocopy);
6378 * CMT: CUCv2 algorithm. From the
6379 * cumack'd TSNs, for each TSN being
6380 * acked for the first time, set the
6381 * following variables for the
6382 * corresp destination.
6383 * new_pseudo_cumack will trigger a
6385 * find_(rtx_)pseudo_cumack will
6386 * trigger search for the next
6387 * expected (rtx-)pseudo-cumack.
6389 tp1->whoTo->new_pseudo_cumack = 1;
6390 tp1->whoTo->find_pseudo_cumack = 1;
6391 tp1->whoTo->find_rtx_pseudo_cumack = 1;
6393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
6394 /* sa_ignore NO_NULL_CHK */
6395 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
6398 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6399 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
6401 if (tp1->rec.data.chunk_was_revoked) {
6402 /* deflate the cwnd */
6403 tp1->whoTo->cwnd -= tp1->book_size;
6404 tp1->rec.data.chunk_was_revoked = 0;
6406 tp1->sent = SCTP_DATAGRAM_ACKED;
6407 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
6409 /* sa_ignore NO_NULL_CHK */
6410 sctp_free_bufspace(stcb, asoc, tp1, 1);
6411 sctp_m_freem(tp1->data);
6413 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6414 sctp_log_sack(asoc->last_acked_seq,
6416 tp1->rec.data.TSN_seq,
6419 SCTP_LOG_FREE_SENT);
6422 asoc->sent_queue_cnt--;
6423 sctp_free_a_chunk(stcb, tp1);
6431 /* sa_ignore NO_NULL_CHK */
6432 if (stcb->sctp_socket) {
6433 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6438 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
6439 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6440 /* sa_ignore NO_NULL_CHK */
6441 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
6443 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6444 so = SCTP_INP_SO(stcb->sctp_ep);
6445 atomic_add_int(&stcb->asoc.refcnt, 1);
6446 SCTP_TCB_UNLOCK(stcb);
6447 SCTP_SOCKET_LOCK(so, 1);
6448 SCTP_TCB_LOCK(stcb);
6449 atomic_subtract_int(&stcb->asoc.refcnt, 1);
6450 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
6451 /* assoc was freed while we were unlocked */
6452 SCTP_SOCKET_UNLOCK(so, 1);
6456 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
6457 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6458 SCTP_SOCKET_UNLOCK(so, 1);
6461 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6462 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
6466 /* JRS - Use the congestion control given in the CC module */
6467 if (asoc->last_acked_seq != cumack)
6468 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
6470 asoc->last_acked_seq = cumack;
6472 if (TAILQ_EMPTY(&asoc->sent_queue)) {
6473 /* nothing left in-flight */
6474 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6475 net->flight_size = 0;
6476 net->partial_bytes_acked = 0;
6478 asoc->total_flight = 0;
6479 asoc->total_flight_count = 0;
6481 /* Fix up the a-p-a-p for future PR-SCTP sends */
6482 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
6483 asoc->advanced_peer_ack_point = cumack;
6485 /* ECN Nonce updates */
6486 if (asoc->ecn_nonce_allowed) {
6487 if (asoc->nonce_sum_check) {
6488 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
6489 if (asoc->nonce_wait_for_ecne == 0) {
6490 struct sctp_tmit_chunk *lchk;
6492 lchk = TAILQ_FIRST(&asoc->send_queue);
6493 asoc->nonce_wait_for_ecne = 1;
6495 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
6497 asoc->nonce_wait_tsn = asoc->sending_seq;
6500 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
6501 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
6503 * Misbehaving peer. We need
6504 * to react to this guy
6506 asoc->ecn_allowed = 0;
6507 asoc->ecn_nonce_allowed = 0;
6512 /* See if Resynchronization Possible */
6513 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
6514 asoc->nonce_sum_check = 1;
6516 * now we must calculate what the base is.
6517 * We do this based on two things, we know
6518 * the total's for all the segments
6519 * gap-acked in the SACK (none), We also
6520 * know the SACK's nonce sum, its in
6521 * nonce_sum_flag. So we can build a truth
6522 * table to back-calculate the new value of
6523 * asoc->nonce_sum_expect_base:
6525 * SACK-flag-Value Seg-Sums Base 0 0 0
6528 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
6533 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6534 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6535 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6536 /* SWS sender side engages */
6537 asoc->peers_rwnd = 0;
6539 if (asoc->peers_rwnd > old_rwnd) {
6540 win_probe_recovery = 1;
6542 /* Now assure a timer where data is queued at */
6545 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6546 if (win_probe_recovery && (net->window_probe)) {
6547 net->window_probe = 0;
6548 win_probe_recovered = 1;
6550 * Find first chunk that was used with window probe
6551 * and clear the sent
6553 /* sa_ignore FREED_MEMORY */
6554 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6555 if (tp1->window_probe) {
6556 /* move back to data send queue */
6557 sctp_window_probe_recovery(stcb, asoc, net, tp1);
6562 if (net->flight_size) {
6565 if (net->RTO == 0) {
6566 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
6568 to_ticks = MSEC_TO_TICKS(net->RTO);
6571 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6572 sctp_timeout_handler, &net->rxt_timer);
6574 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
6575 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
6577 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
6579 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
6580 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
6581 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
6582 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
6583 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
6589 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
6590 (asoc->sent_queue_retran_cnt == 0) &&
6591 (win_probe_recovered == 0) &&
6593 /* huh, this should not happen */
6594 sctp_fs_audit(asoc);
6595 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6596 net->flight_size = 0;
6598 asoc->total_flight = 0;
6599 asoc->total_flight_count = 0;
6600 asoc->sent_queue_retran_cnt = 0;
6601 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6602 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6603 sctp_flight_size_increase(tp1);
6604 sctp_total_flight_increase(stcb, tp1);
6605 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6606 asoc->sent_queue_retran_cnt++;
6612 /**********************************/
6613 /* Now what about shutdown issues */
6614 /**********************************/
6615 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
6616 /* nothing left on sendqueue.. consider done */
6618 if ((asoc->stream_queue_cnt == 1) &&
6619 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
6620 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
6621 (asoc->locked_on_sending)
6623 struct sctp_stream_queue_pending *sp;
6626 * I may be in a state where we got all across.. but
6627 * cannot write more due to a shutdown... we abort
6628 * since the user did not indicate EOR in this case.
6629 * The sp will be cleaned during free of the asoc.
6631 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
6633 if ((sp) && (sp->length == 0)) {
6634 /* Let cleanup code purge it */
6635 if (sp->msg_is_complete) {
6636 asoc->stream_queue_cnt--;
6638 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6639 asoc->locked_on_sending = NULL;
6640 asoc->stream_queue_cnt--;
6644 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
6645 (asoc->stream_queue_cnt == 0)) {
6646 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6647 /* Need to abort here */
6653 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6654 0, M_DONTWAIT, 1, MT_DATA);
6656 struct sctp_paramhdr *ph;
6659 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6661 ph = mtod(oper, struct sctp_paramhdr *);
6662 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6663 ph->param_length = htons(SCTP_BUF_LEN(oper));
6664 ippp = (uint32_t *) (ph + 1);
6665 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
6667 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
6668 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
6670 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
6671 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
6672 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6674 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6675 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6676 sctp_stop_timers_for_shutdown(stcb);
6677 sctp_send_shutdown(stcb,
6678 stcb->asoc.primary_destination);
6679 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
6680 stcb->sctp_ep, stcb, asoc->primary_destination);
6681 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
6682 stcb->sctp_ep, stcb, asoc->primary_destination);
6684 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
6685 (asoc->stream_queue_cnt == 0)) {
6686 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6689 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6690 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
6691 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6692 sctp_send_shutdown_ack(stcb,
6693 stcb->asoc.primary_destination);
6695 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
6696 stcb->sctp_ep, stcb, asoc->primary_destination);
6699 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
6700 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
6702 stcb->asoc.peers_rwnd,
6703 stcb->asoc.total_flight,
6704 stcb->asoc.total_output_queue_size);
6708 /* EY! nr_sack version of sctp_handle_segments, nr-gapped TSNs get removed from RtxQ in this method*/
6710 sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
6711 struct sctp_nr_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
6712 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
6713 uint32_t num_seg, uint32_t num_nr_seg, int *ecn_seg_sums)
6715 /************************************************/
6716 /* process fragments and update sendqueue */
6717 /************************************************/
6718 struct sctp_nr_sack *nr_sack;
6719 struct sctp_gap_ack_block *frag, block;
6720 struct sctp_nr_gap_ack_block *nr_frag, nr_block;
6721 struct sctp_tmit_chunk *tp1;
6722 uint32_t i, j, all_bit;
6727 uint16_t frag_strt, frag_end, primary_flag_set;
6728 uint16_t nr_frag_strt, nr_frag_end;
6730 uint32_t last_frag_high;
6731 uint32_t last_nr_frag_high;
6733 all_bit = ch->ch.chunk_flags & SCTP_NR_SACK_ALL_BIT;
6736 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
6738 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
6739 primary_flag_set = 1;
6741 primary_flag_set = 0;
6743 nr_sack = &ch->nr_sack;
6746 * EY! - I will process nr_gaps similarly,by going to this position
6747 * again if All bit is set
6749 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
6750 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
6751 *offset += sizeof(block);
6757 for (i = 0; i < num_seg; i++) {
6758 frag_strt = ntohs(frag->start);
6759 frag_end = ntohs(frag->end);
6760 /* some sanity checks on the fargment offsets */
6761 if (frag_strt > frag_end) {
6762 /* this one is malformed, skip */
6766 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
6768 *biggest_tsn_acked = frag_end + last_tsn;
6770 /* mark acked dgs and find out the highestTSN being acked */
6772 tp1 = TAILQ_FIRST(&asoc->sent_queue);
6774 /* save the locations of the last frags */
6775 last_frag_high = frag_end + last_tsn;
6778 * now lets see if we need to reset the queue due to
6779 * a out-of-order SACK fragment
6781 if (compare_with_wrap(frag_strt + last_tsn,
6782 last_frag_high, MAX_TSN)) {
6784 * if the new frag starts after the last TSN
6785 * frag covered, we are ok and this one is
6786 * beyond the last one
6791 * ok, they have reset us, so we need to
6792 * reset the queue this will cause extra
6793 * hunting but hey, they chose the
6794 * performance hit when they failed to order
6797 tp1 = TAILQ_FIRST(&asoc->sent_queue);
6799 last_frag_high = frag_end + last_tsn;
6801 for (j = frag_strt; j <= frag_end; j++) {
6802 theTSN = j + last_tsn;
6804 if (tp1->rec.data.doing_fast_retransmit)
6808 * CMT: CUCv2 algorithm. For each TSN being
6809 * processed from the sent queue, track the
6810 * next expected pseudo-cumack, or
6811 * rtx_pseudo_cumack, if required. Separate
6812 * cumack trackers for first transmissions,
6813 * and retransmissions.
6815 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6816 (tp1->snd_count == 1)) {
6817 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
6818 tp1->whoTo->find_pseudo_cumack = 0;
6820 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6821 (tp1->snd_count > 1)) {
6822 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
6823 tp1->whoTo->find_rtx_pseudo_cumack = 0;
6825 if (tp1->rec.data.TSN_seq == theTSN) {
6826 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
6828 * must be held until
6832 * ECN Nonce: Add the nonce
6833 * value to the sender's
6836 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6838 * If it is less than RESEND, it is
6839 * now no-longer in flight.
6840 * Higher values may already be set
6841 * via previous Gap Ack Blocks...
6842 * i.e. ACKED or RESEND.
6844 if (compare_with_wrap(tp1->rec.data.TSN_seq,
6845 *biggest_newly_acked_tsn, MAX_TSN)) {
6846 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
6855 * this_sack_highest_
6859 if (tp1->rec.data.chunk_was_revoked == 0)
6860 tp1->whoTo->saw_newack = 1;
6862 if (compare_with_wrap(tp1->rec.data.TSN_seq,
6863 tp1->whoTo->this_sack_highest_newack,
6865 tp1->whoTo->this_sack_highest_newack =
6866 tp1->rec.data.TSN_seq;
6871 * this_sack_lowest_n
6874 if (*this_sack_lowest_newack == 0) {
6875 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6876 sctp_log_sack(*this_sack_lowest_newack,
6878 tp1->rec.data.TSN_seq,
6881 SCTP_LOG_TSN_ACKED);
6883 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
6888 * (rtx-)pseudo-cumac
6893 * (rtx-)pseudo-cumac
6895 * new_(rtx_)pseudo_c
6903 * (rtx-)pseudo-cumac
6911 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
6912 if (tp1->rec.data.chunk_was_revoked == 0) {
6913 tp1->whoTo->new_pseudo_cumack = 1;
6915 tp1->whoTo->find_pseudo_cumack = 1;
6917 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
6918 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
6920 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
6921 if (tp1->rec.data.chunk_was_revoked == 0) {
6922 tp1->whoTo->new_pseudo_cumack = 1;
6924 tp1->whoTo->find_rtx_pseudo_cumack = 1;
6926 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6927 sctp_log_sack(*biggest_newly_acked_tsn,
6929 tp1->rec.data.TSN_seq,
6932 SCTP_LOG_TSN_ACKED);
6934 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6935 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
6936 tp1->whoTo->flight_size,
6938 (uintptr_t) tp1->whoTo,
6939 tp1->rec.data.TSN_seq);
6941 sctp_flight_size_decrease(tp1);
6942 sctp_total_flight_decrease(stcb, tp1);
6944 tp1->whoTo->net_ack += tp1->send_size;
6945 if (tp1->snd_count < 2) {
6952 tp1->whoTo->net_ack2 += tp1->send_size;
6960 sctp_calculate_rto(stcb,
6963 &tp1->sent_rcv_time,
6964 sctp_align_safe_nocopy);
6969 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
6970 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
6971 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
6972 if (compare_with_wrap(tp1->rec.data.TSN_seq,
6973 asoc->this_sack_highest_gap,
6975 asoc->this_sack_highest_gap =
6976 tp1->rec.data.TSN_seq;
6978 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6979 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
6980 #ifdef SCTP_AUDITING_ENABLED
6981 sctp_audit_log(0xB2,
6982 (asoc->sent_queue_retran_cnt & 0x000000ff));
6987 * All chunks NOT UNSENT
6988 * fall through here and are
6991 tp1->sent = SCTP_DATAGRAM_MARKED;
6992 if (tp1->rec.data.chunk_was_revoked) {
6993 /* deflate the cwnd */
6994 tp1->whoTo->cwnd -= tp1->book_size;
6995 tp1->rec.data.chunk_was_revoked = 0;
6998 * EY - if all bit is set
7003 tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7005 * TAILQ_REMOVE(&asoc
7015 sctp_free_bufspace(stcb, asoc, tp1, 1);
7016 sctp_m_freem(tp1->data);
7020 * asoc->sent_queue_c
7024 * sctp_free_a_chunk(
7031 } /* if (tp1->TSN_seq == theTSN) */
7032 if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
7036 tp1 = TAILQ_NEXT(tp1, sctp_next);
7037 } /* end while (tp1) */
7038 } /* end for (j = fragStart */
7039 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
7040 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
7041 *offset += sizeof(block);
7047 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
7049 sctp_log_fr(*biggest_tsn_acked,
7050 *biggest_newly_acked_tsn,
7051 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
7054 * EY - if all bit is not set then there should be other loops to
7059 nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7060 sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7061 *offset += sizeof(nr_block);
7065 if (nr_frag == NULL) {
7069 last_nr_frag_high = 0;
7071 for (i = 0; i < num_nr_seg; i++) {
7073 nr_frag_strt = ntohs(nr_frag->start);
7074 nr_frag_end = ntohs(nr_frag->end);
7076 /* some sanity checks on the nr fargment offsets */
7077 if (nr_frag_strt > nr_frag_end) {
7078 /* this one is malformed, skip */
7083 * mark acked dgs and find out the highestTSN being
7087 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7089 /* save the locations of the last frags */
7090 last_nr_frag_high = nr_frag_end + last_tsn;
7093 * now lets see if we need to reset the
7094 * queue due to a out-of-order SACK fragment
7096 if (compare_with_wrap(nr_frag_strt + last_tsn,
7097 last_nr_frag_high, MAX_TSN)) {
7099 * if the new frag starts after the
7100 * last TSN frag covered, we are ok
7101 * and this one is beyond the last
7107 * ok, they have reset us, so we
7108 * need to reset the queue this will
7109 * cause extra hunting but hey, they
7110 * chose the performance hit when
7111 * they failed to order there gaps..
7113 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7115 last_nr_frag_high = nr_frag_end + last_tsn;
7118 for (j = nr_frag_strt + last_tsn; (compare_with_wrap((nr_frag_end + last_tsn), j, MAX_TSN)); j++) {
7120 if (tp1->rec.data.TSN_seq == j) {
7121 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7122 tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7124 * TAILQ_REMOVE(&asoc
7134 sctp_free_bufspace(stcb, asoc, tp1, 1);
7135 sctp_m_freem(tp1->data);
7139 * asoc->sent_queue_c
7143 * sctp_free_a_chunk(
7149 } /* if (tp1->TSN_seq == j) */
7150 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
7153 tp1 = TAILQ_NEXT(tp1, sctp_next);
7154 } /* end while (tp1) */
7156 } /* end for (j = nrFragStart */
7158 nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7159 sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7160 *offset += sizeof(nr_block);
7161 if (nr_frag == NULL) {
7164 } /* end of if(!all_bit) */
7167 * EY- wake up the socket if things have been removed from the sent
7170 if ((wake_him) && (stcb->sctp_socket)) {
7171 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7175 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7177 * if (SCTP_BASE_SYSCTL(sctp_logging_level) &
7178 * SCTP_WAKE_LOGGING_ENABLE) { sctp_wakeup_log(stcb,
7179 * cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);}
7181 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7182 so = SCTP_INP_SO(stcb->sctp_ep);
7183 atomic_add_int(&stcb->asoc.refcnt, 1);
7184 SCTP_TCB_UNLOCK(stcb);
7185 SCTP_SOCKET_LOCK(so, 1);
7186 SCTP_TCB_LOCK(stcb);
7187 atomic_subtract_int(&stcb->asoc.refcnt, 1);
7188 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7189 /* assoc was freed while we were unlocked */
7190 SCTP_SOCKET_UNLOCK(so, 1);
7194 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7195 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7196 SCTP_SOCKET_UNLOCK(so, 1);
7199 * (SCTP_BASE_SYSCTL(sctp_logging_level) &
7200 * SCTP_WAKE_LOGGING_ENABLE) {
7201 * sctp_wakeup_log(stcb, cum_ack, wake_him,
7202 * SCTP_NOWAKE_FROM_SACK); } } */
7206 /* Identifies the non-renegable tsns that are revoked*/
7208 sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
7209 struct sctp_association *asoc, uint32_t cumack,
7210 u_long biggest_tsn_acked)
7212 struct sctp_tmit_chunk *tp1;
7214 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7216 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
7219 * ok this guy is either ACK or MARKED. If it is
7220 * ACKED it has been previously acked but not this
7221 * time i.e. revoked. If it is MARKED it was ACK'ed
7224 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
7229 if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
7231 * EY! a non-renegable TSN is revoked, need
7232 * to abort the association
7235 * EY TODO: put in the code to abort the
7239 } else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
7240 /* it has been re-acked in this SACK */
7241 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
7244 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
7246 tp1 = TAILQ_NEXT(tp1, sctp_next);
7250 /* EY! nr_sack version of sctp_handle_sack, nr_gap_ack processing should be added to this method*/
7252 sctp_handle_nr_sack(struct mbuf *m, int offset,
7253 struct sctp_nr_sack_chunk *ch, struct sctp_tcb *stcb,
7254 struct sctp_nets *net_from, int *abort_now, int nr_sack_len, uint32_t rwnd)
7256 struct sctp_association *asoc;
7259 struct sctp_nr_sack *nr_sack;
7260 struct sctp_tmit_chunk *tp1, *tp2;
7261 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
7262 this_sack_lowest_newack;
7263 uint32_t sav_cum_ack;
7266 uint16_t num_seg, num_nr_seg, num_dup;
7267 uint16_t wake_him = 0;
7268 unsigned int nr_sack_length;
7269 uint32_t send_s = 0;
7271 int accum_moved = 0;
7272 int will_exit_fast_recovery = 0;
7273 uint32_t a_rwnd, old_rwnd;
7274 int win_probe_recovery = 0;
7275 int win_probe_recovered = 0;
7276 struct sctp_nets *net = NULL;
7277 int nonce_sum_flag, ecn_seg_sums = 0, all_bit;
7279 uint8_t reneged_all = 0;
7280 uint8_t cmt_dac_flag;
7283 * we take any chance we can to service our queues since we cannot
7284 * get awoken when the socket is read from :<
7287 * Now perform the actual SACK handling: 1) Verify that it is not an
7288 * old sack, if so discard. 2) If there is nothing left in the send
7289 * queue (cum-ack is equal to last acked) then you have a duplicate
7290 * too, update any rwnd change and verify no timers are running.
7291 * then return. 3) Process any new consequtive data i.e. cum-ack
7292 * moved process these first and note that it moved. 4) Process any
7293 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
7294 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
7295 * sync up flightsizes and things, stop all timers and also check
7296 * for shutdown_pending state. If so then go ahead and send off the
7297 * shutdown. If in shutdown recv, send off the shutdown-ack and
7298 * start that timer, Ret. 9) Strike any non-acked things and do FR
7299 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
7300 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
7301 * if in shutdown_recv state.
7303 SCTP_TCB_LOCK_ASSERT(stcb);
7304 nr_sack = &ch->nr_sack;
7306 this_sack_lowest_newack = 0;
7308 nr_sack_length = (unsigned int)nr_sack_len;
7310 SCTP_STAT_INCR(sctps_slowpath_sack);
7311 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
7312 cum_ack = last_tsn = ntohl(nr_sack->cum_tsn_ack);
7313 #ifdef SCTP_ASOCLOG_OF_TSNS
7314 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
7315 stcb->asoc.cumack_log_at++;
7316 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
7317 stcb->asoc.cumack_log_at = 0;
7320 all_bit = ch->ch.chunk_flags & SCTP_NR_SACK_ALL_BIT;
7321 num_seg = ntohs(nr_sack->num_gap_ack_blks);
7322 num_nr_seg = ntohs(nr_sack->num_nr_gap_ack_blks);
7324 num_seg = num_nr_seg;
7327 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
7328 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
7329 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
7332 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
7333 num_dup = ntohs(nr_sack->num_dup_tsns);
7335 old_rwnd = stcb->asoc.peers_rwnd;
7336 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
7337 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
7338 stcb->asoc.overall_error_count,
7340 SCTP_FROM_SCTP_INDATA,
7343 stcb->asoc.overall_error_count = 0;
7345 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7346 sctp_log_sack(asoc->last_acked_seq,
7353 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
7354 int off_to_dup, iii;
7355 uint32_t *dupdata, dblock;
7357 /* EY! gotta be careful here */
7359 off_to_dup = (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) +
7360 sizeof(struct sctp_nr_sack_chunk);
7362 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) +
7363 (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) + sizeof(struct sctp_nr_sack_chunk);
7365 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= nr_sack_length) {
7366 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7367 sizeof(uint32_t), (uint8_t *) & dblock);
7368 off_to_dup += sizeof(uint32_t);
7370 for (iii = 0; iii < num_dup; iii++) {
7371 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
7372 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7373 sizeof(uint32_t), (uint8_t *) & dblock);
7374 if (dupdata == NULL)
7376 off_to_dup += sizeof(uint32_t);
7380 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d nr_sack_len:%d num gaps:%d num nr_gaps:%d\n",
7381 off_to_dup, num_dup, nr_sack_length, num_seg, num_nr_seg);
7384 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7386 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
7387 tp1 = TAILQ_LAST(&asoc->sent_queue,
7388 sctpchunk_listhead);
7389 send_s = tp1->rec.data.TSN_seq + 1;
7391 send_s = asoc->sending_seq;
7393 if (cum_ack == send_s ||
7394 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
7401 panic("Impossible sack 1");
7406 * no way, we have not even sent this TSN out yet.
7407 * Peer is hopelessly messed up with us.
7412 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
7413 0, M_DONTWAIT, 1, MT_DATA);
7415 struct sctp_paramhdr *ph;
7418 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
7420 ph = mtod(oper, struct sctp_paramhdr *);
7421 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
7422 ph->param_length = htons(SCTP_BUF_LEN(oper));
7423 ippp = (uint32_t *) (ph + 1);
7424 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
7426 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
7427 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
7432 /**********************/
7433 /* 1) check the range */
7434 /**********************/
7435 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
7436 /* acking something behind */
7439 sav_cum_ack = asoc->last_acked_seq;
7441 /* update the Rwnd of the peer */
7442 if (TAILQ_EMPTY(&asoc->sent_queue) &&
7443 TAILQ_EMPTY(&asoc->send_queue) &&
7444 (asoc->stream_queue_cnt == 0)
7446 /* nothing left on send/sent and strmq */
7447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7448 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7449 asoc->peers_rwnd, 0, 0, a_rwnd);
7451 asoc->peers_rwnd = a_rwnd;
7452 if (asoc->sent_queue_retran_cnt) {
7453 asoc->sent_queue_retran_cnt = 0;
7455 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7456 /* SWS sender side engages */
7457 asoc->peers_rwnd = 0;
7459 /* stop any timers */
7460 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7461 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7462 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7463 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7464 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7465 SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
7466 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7467 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7470 net->partial_bytes_acked = 0;
7471 net->flight_size = 0;
7473 asoc->total_flight = 0;
7474 asoc->total_flight_count = 0;
7478 * We init netAckSz and netAckSz2 to 0. These are used to track 2
7479 * things. The total byte count acked is tracked in netAckSz AND
7480 * netAck2 is used to track the total bytes acked that are un-
7481 * amibguious and were never retransmitted. We track these on a per
7482 * destination address basis.
7484 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7485 net->prev_cwnd = net->cwnd;
7490 * CMT: Reset CUC and Fast recovery algo variables before
7493 net->new_pseudo_cumack = 0;
7494 net->will_exit_fast_recovery = 0;
7496 /* process the new consecutive TSN first */
7497 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7499 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
7501 last_tsn == tp1->rec.data.TSN_seq) {
7502 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7504 * ECN Nonce: Add the nonce to the sender's
7507 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
7509 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
7511 * If it is less than ACKED, it is
7512 * now no-longer in flight. Higher
7513 * values may occur during marking
7515 if ((tp1->whoTo->dest_state &
7516 SCTP_ADDR_UNCONFIRMED) &&
7517 (tp1->snd_count < 2)) {
7519 * If there was no retran
7520 * and the address is
7521 * un-confirmed and we sent
7523 * sacked.. its confirmed,
7526 tp1->whoTo->dest_state &=
7527 ~SCTP_ADDR_UNCONFIRMED;
7529 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
7530 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7531 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
7532 tp1->whoTo->flight_size,
7534 (uintptr_t) tp1->whoTo,
7535 tp1->rec.data.TSN_seq);
7537 sctp_flight_size_decrease(tp1);
7538 sctp_total_flight_decrease(stcb, tp1);
7540 tp1->whoTo->net_ack += tp1->send_size;
7542 /* CMT SFR and DAC algos */
7543 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
7544 tp1->whoTo->saw_newack = 1;
7546 if (tp1->snd_count < 2) {
7548 * True non-retransmited
7551 tp1->whoTo->net_ack2 +=
7554 /* update RTO too? */
7557 sctp_calculate_rto(stcb,
7559 &tp1->sent_rcv_time,
7560 sctp_align_safe_nocopy);
7565 * CMT: CUCv2 algorithm. From the
7566 * cumack'd TSNs, for each TSN being
7567 * acked for the first time, set the
7568 * following variables for the
7569 * corresp destination.
7570 * new_pseudo_cumack will trigger a
7572 * find_(rtx_)pseudo_cumack will
7573 * trigger search for the next
7574 * expected (rtx-)pseudo-cumack.
7576 tp1->whoTo->new_pseudo_cumack = 1;
7577 tp1->whoTo->find_pseudo_cumack = 1;
7578 tp1->whoTo->find_rtx_pseudo_cumack = 1;
7581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7582 sctp_log_sack(asoc->last_acked_seq,
7584 tp1->rec.data.TSN_seq,
7587 SCTP_LOG_TSN_ACKED);
7589 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7590 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
7593 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7594 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7595 #ifdef SCTP_AUDITING_ENABLED
7596 sctp_audit_log(0xB3,
7597 (asoc->sent_queue_retran_cnt & 0x000000ff));
7600 if (tp1->rec.data.chunk_was_revoked) {
7601 /* deflate the cwnd */
7602 tp1->whoTo->cwnd -= tp1->book_size;
7603 tp1->rec.data.chunk_was_revoked = 0;
7605 tp1->sent = SCTP_DATAGRAM_ACKED;
7610 tp1 = TAILQ_NEXT(tp1, sctp_next);
7612 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
7613 /* always set this up to cum-ack */
7614 asoc->this_sack_highest_gap = last_tsn;
7616 /* Move offset up to point to gaps/dups */
7617 offset += sizeof(struct sctp_nr_sack_chunk);
7618 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_nr_sack_chunk)) > nr_sack_length) {
7620 /* skip corrupt segments */
7626 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
7627 * to be greater than the cumack. Also reset saw_newack to 0
7630 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7631 net->saw_newack = 0;
7632 net->this_sack_highest_newack = last_tsn;
7636 * thisSackHighestGap will increase while handling NEW
7637 * segments this_sack_highest_newack will increase while
7638 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
7639 * used for CMT DAC algo. saw_newack will also change.
7642 sctp_handle_nr_sack_segments(m, &offset, stcb, asoc, ch, last_tsn,
7643 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
7644 num_seg, num_nr_seg, &ecn_seg_sums);
7647 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7649 * validate the biggest_tsn_acked in the gap acks if
7650 * strict adherence is wanted.
7652 if ((biggest_tsn_acked == send_s) ||
7653 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
7655 * peer is either confused or we are under
7656 * attack. We must abort.
7663 /*******************************************/
7664 /* cancel ALL T3-send timer if accum moved */
7665 /*******************************************/
7666 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
7667 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7668 if (net->new_pseudo_cumack)
7669 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7671 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
7676 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7677 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7678 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
7682 /********************************************/
7683 /* drop the acked chunks from the sendqueue */
7684 /********************************************/
7685 asoc->last_acked_seq = cum_ack;
7687 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7691 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
7695 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
7696 /* no more sent on list */
7697 printf("Warning, tp1->sent == %d and its now acked?\n",
7700 tp2 = TAILQ_NEXT(tp1, sctp_next);
7701 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
7702 if (tp1->pr_sctp_on) {
7703 if (asoc->pr_sctp_cnt != 0)
7704 asoc->pr_sctp_cnt--;
7706 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
7707 (asoc->total_flight > 0)) {
7709 panic("Warning flight size is postive and should be 0");
7711 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
7712 asoc->total_flight);
7714 asoc->total_flight = 0;
7717 /* sa_ignore NO_NULL_CHK */
7718 sctp_free_bufspace(stcb, asoc, tp1, 1);
7719 sctp_m_freem(tp1->data);
7720 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
7721 asoc->sent_queue_cnt_removeable--;
7724 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7725 sctp_log_sack(asoc->last_acked_seq,
7727 tp1->rec.data.TSN_seq,
7730 SCTP_LOG_FREE_SENT);
7733 asoc->sent_queue_cnt--;
7734 sctp_free_a_chunk(stcb, tp1);
7737 } while (tp1 != NULL);
7740 /* sa_ignore NO_NULL_CHK */
7741 if ((wake_him) && (stcb->sctp_socket)) {
7742 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7746 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7747 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7748 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
7750 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7751 so = SCTP_INP_SO(stcb->sctp_ep);
7752 atomic_add_int(&stcb->asoc.refcnt, 1);
7753 SCTP_TCB_UNLOCK(stcb);
7754 SCTP_SOCKET_LOCK(so, 1);
7755 SCTP_TCB_LOCK(stcb);
7756 atomic_subtract_int(&stcb->asoc.refcnt, 1);
7757 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7758 /* assoc was freed while we were unlocked */
7759 SCTP_SOCKET_UNLOCK(so, 1);
7763 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7764 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7765 SCTP_SOCKET_UNLOCK(so, 1);
7768 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7769 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
7773 if (asoc->fast_retran_loss_recovery && accum_moved) {
7774 if (compare_with_wrap(asoc->last_acked_seq,
7775 asoc->fast_recovery_tsn, MAX_TSN) ||
7776 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
7777 /* Setup so we will exit RFC2582 fast recovery */
7778 will_exit_fast_recovery = 1;
7782 * Check for revoked fragments:
7784 * if Previous sack - Had no frags then we can't have any revoked if
7785 * Previous sack - Had frag's then - If we now have frags aka
7786 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
7787 * some of them. else - The peer revoked all ACKED fragments, since
7788 * we had some before and now we have NONE.
7792 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7794 else if (asoc->saw_sack_with_frags) {
7795 int cnt_revoked = 0;
7797 tp1 = TAILQ_FIRST(&asoc->sent_queue);
7799 /* Peer revoked all dg's marked or acked */
7800 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
7802 * EY- maybe check only if it is nr_acked
7803 * nr_marked may not be possible
7805 if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
7806 (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
7808 * EY! - TODO: Something previously
7809 * nr_gapped is reneged, abort the
7814 if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
7815 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
7816 tp1->sent = SCTP_DATAGRAM_SENT;
7817 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7818 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
7819 tp1->whoTo->flight_size,
7821 (uintptr_t) tp1->whoTo,
7822 tp1->rec.data.TSN_seq);
7824 sctp_flight_size_increase(tp1);
7825 sctp_total_flight_increase(stcb, tp1);
7826 tp1->rec.data.chunk_was_revoked = 1;
7828 * To ensure that this increase in
7829 * flightsize, which is artificial,
7830 * does not throttle the sender, we
7831 * also increase the cwnd
7834 tp1->whoTo->cwnd += tp1->book_size;
7842 asoc->saw_sack_with_frags = 0;
7845 asoc->saw_sack_with_frags = 1;
7847 asoc->saw_sack_with_frags = 0;
7849 /* EY! - not sure about if there should be an IF */
7851 sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7852 else if (asoc->saw_sack_with_nr_frags) {
7854 * EY!- TODO: all previously nr_gapped chunks have been
7855 * reneged abort the association
7857 asoc->saw_sack_with_nr_frags = 0;
7860 asoc->saw_sack_with_nr_frags = 1;
7862 asoc->saw_sack_with_nr_frags = 0;
7863 /* JRS - Use the congestion control given in the CC module */
7864 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
7866 if (TAILQ_EMPTY(&asoc->sent_queue)) {
7867 /* nothing left in-flight */
7868 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7869 /* stop all timers */
7870 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7871 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7872 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
7873 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7874 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
7877 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7878 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
7879 net->flight_size = 0;
7880 net->partial_bytes_acked = 0;
7882 asoc->total_flight = 0;
7883 asoc->total_flight_count = 0;
7885 /**********************************/
7886 /* Now what about shutdown issues */
7887 /**********************************/
7888 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
7889 /* nothing left on sendqueue.. consider done */
7890 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7891 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7892 asoc->peers_rwnd, 0, 0, a_rwnd);
7894 asoc->peers_rwnd = a_rwnd;
7895 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7896 /* SWS sender side engages */
7897 asoc->peers_rwnd = 0;
7900 if ((asoc->stream_queue_cnt == 1) &&
7901 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7902 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
7903 (asoc->locked_on_sending)
7905 struct sctp_stream_queue_pending *sp;
7908 * I may be in a state where we got all across.. but
7909 * cannot write more due to a shutdown... we abort
7910 * since the user did not indicate EOR in this case.
7912 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
7914 if ((sp) && (sp->length == 0)) {
7915 asoc->locked_on_sending = NULL;
7916 if (sp->msg_is_complete) {
7917 asoc->stream_queue_cnt--;
7919 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
7920 asoc->stream_queue_cnt--;
7924 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
7925 (asoc->stream_queue_cnt == 0)) {
7926 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
7927 /* Need to abort here */
7933 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
7934 0, M_DONTWAIT, 1, MT_DATA);
7936 struct sctp_paramhdr *ph;
7939 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
7941 ph = mtod(oper, struct sctp_paramhdr *);
7942 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
7943 ph->param_length = htons(SCTP_BUF_LEN(oper));
7944 ippp = (uint32_t *) (ph + 1);
7945 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
7947 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
7948 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
7951 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
7952 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
7953 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7955 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
7956 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
7957 sctp_stop_timers_for_shutdown(stcb);
7958 sctp_send_shutdown(stcb,
7959 stcb->asoc.primary_destination);
7960 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
7961 stcb->sctp_ep, stcb, asoc->primary_destination);
7962 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
7963 stcb->sctp_ep, stcb, asoc->primary_destination);
7966 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
7967 (asoc->stream_queue_cnt == 0)) {
7968 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
7971 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7972 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
7973 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
7974 sctp_send_shutdown_ack(stcb,
7975 stcb->asoc.primary_destination);
7977 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
7978 stcb->sctp_ep, stcb, asoc->primary_destination);
7983 * Now here we are going to recycle net_ack for a different use...
7986 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7991 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
7992 * to be done. Setting this_sack_lowest_newack to the cum_ack will
7993 * automatically ensure that.
7995 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
7996 this_sack_lowest_newack = cum_ack;
7999 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
8000 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
8002 /* JRS - Use the congestion control given in the CC module */
8003 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
8005 /******************************************************************
8006 * Here we do the stuff with ECN Nonce checking.
8007 * We basically check to see if the nonce sum flag was incorrect
8008 * or if resynchronization needs to be done. Also if we catch a
8009 * misbehaving receiver we give him the kick.
8010 ******************************************************************/
8012 if (asoc->ecn_nonce_allowed) {
8013 if (asoc->nonce_sum_check) {
8014 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
8015 if (asoc->nonce_wait_for_ecne == 0) {
8016 struct sctp_tmit_chunk *lchk;
8018 lchk = TAILQ_FIRST(&asoc->send_queue);
8019 asoc->nonce_wait_for_ecne = 1;
8021 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
8023 asoc->nonce_wait_tsn = asoc->sending_seq;
8026 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
8027 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
8029 * Misbehaving peer. We need
8030 * to react to this guy
8032 asoc->ecn_allowed = 0;
8033 asoc->ecn_nonce_allowed = 0;
8038 /* See if Resynchronization Possible */
8039 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
8040 asoc->nonce_sum_check = 1;
8042 * now we must calculate what the base is.
8043 * We do this based on two things, we know
8044 * the total's for all the segments
8045 * gap-acked in the SACK, its stored in
8046 * ecn_seg_sums. We also know the SACK's
8047 * nonce sum, its in nonce_sum_flag. So we
8048 * can build a truth table to back-calculate
8050 * asoc->nonce_sum_expect_base:
8052 * SACK-flag-Value Seg-Sums Base 0 0 0
8055 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
8059 /* Now are we exiting loss recovery ? */
8060 if (will_exit_fast_recovery) {
8061 /* Ok, we must exit fast recovery */
8062 asoc->fast_retran_loss_recovery = 0;
8064 if ((asoc->sat_t3_loss_recovery) &&
8065 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
8067 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
8068 /* end satellite t3 loss recovery */
8069 asoc->sat_t3_loss_recovery = 0;
8074 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8075 if (net->will_exit_fast_recovery) {
8076 /* Ok, we must exit fast recovery */
8077 net->fast_retran_loss_recovery = 0;
8081 /* Adjust and set the new rwnd value */
8082 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
8083 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
8084 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
8086 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
8087 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
8088 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
8089 /* SWS sender side engages */
8090 asoc->peers_rwnd = 0;
8092 if (asoc->peers_rwnd > old_rwnd) {
8093 win_probe_recovery = 1;
8096 * Now we must setup so we have a timer up for anyone with
8102 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8103 if (win_probe_recovery && (net->window_probe)) {
8104 net->window_probe = 0;
8105 win_probe_recovered = 1;
8107 * Find first chunk that was used with
8108 * window probe and clear the event. Put
8109 * it back into the send queue as if has
8112 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8113 if (tp1->window_probe) {
8114 sctp_window_probe_recovery(stcb, asoc, net, tp1);
8119 if (net->flight_size) {
8121 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8122 stcb->sctp_ep, stcb, net);
8124 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8125 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
8127 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
8129 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8130 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8131 SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
8132 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
8133 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
8139 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
8140 (asoc->sent_queue_retran_cnt == 0) &&
8141 (win_probe_recovered == 0) &&
8143 /* huh, this should not happen */
8144 sctp_fs_audit(asoc);
8145 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8146 net->flight_size = 0;
8148 asoc->total_flight = 0;
8149 asoc->total_flight_count = 0;
8150 asoc->sent_queue_retran_cnt = 0;
8151 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8152 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
8153 sctp_flight_size_increase(tp1);
8154 sctp_total_flight_increase(stcb, tp1);
8155 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
8156 asoc->sent_queue_retran_cnt++;
8162 /*********************************************/
8163 /* Here we perform PR-SCTP procedures */
8165 /*********************************************/
8166 /* C1. update advancedPeerAckPoint */
8167 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
8168 asoc->advanced_peer_ack_point = cum_ack;
8170 /* C2. try to further move advancedPeerAckPoint ahead */
8171 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
8172 struct sctp_tmit_chunk *lchk;
8173 uint32_t old_adv_peer_ack_point;
8175 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
8176 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
8177 /* C3. See if we need to send a Fwd-TSN */
8178 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
8181 * ISSUE with ECN, see FWD-TSN processing for notes
8182 * on issues that will occur when the ECN NONCE
8183 * stuff is put into SCTP for cross checking.
8185 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
8187 send_forward_tsn(stcb, asoc);
8189 * ECN Nonce: Disable Nonce Sum check when
8190 * FWD TSN is sent and store resync tsn
8192 asoc->nonce_sum_check = 0;
8193 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
8197 /* Assure a timer is up */
8198 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8199 stcb->sctp_ep, stcb, lchk->whoTo);
8202 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
8203 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
8205 stcb->asoc.peers_rwnd,
8206 stcb->asoc.total_flight,
8207 stcb->asoc.total_output_queue_size);