2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
109 if (calc < stcb->asoc.my_rwnd_control_len) {
118 * Build out our readq entry based on the incoming packet.
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
128 struct sctp_queued_to_read *read_queue_e = NULL;
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
157 return (read_queue_e);
162 * Build out our readq entry based on the incoming packet.
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
168 struct sctp_queued_to_read *read_queue_e = NULL;
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
197 return (read_queue_e);
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
204 struct sctp_extrcvinfo *seinfo;
205 struct sctp_sndrcvinfo *outinfo;
206 struct sctp_rcvinfo *rcvinfo;
207 struct sctp_nxtinfo *nxtinfo;
214 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 /* user does not want any ancillary data */
221 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
224 seinfo = (struct sctp_extrcvinfo *)sinfo;
225 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
228 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
235 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
244 ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
249 SCTP_BUF_LEN(ret) = 0;
251 /* We need a CMSG header followed by the struct */
252 cmh = mtod(ret, struct cmsghdr *);
254 * Make sure that there is no un-initialized padding between the
255 * cmsg header and cmsg data and after the cmsg data.
258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
259 cmh->cmsg_level = IPPROTO_SCTP;
260 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
261 cmh->cmsg_type = SCTP_RCVINFO;
262 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
263 rcvinfo->rcv_sid = sinfo->sinfo_stream;
264 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
265 rcvinfo->rcv_flags = sinfo->sinfo_flags;
266 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
267 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
268 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
269 rcvinfo->rcv_context = sinfo->sinfo_context;
270 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
271 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
272 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
275 cmh->cmsg_level = IPPROTO_SCTP;
276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
277 cmh->cmsg_type = SCTP_NXTINFO;
278 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
279 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
280 nxtinfo->nxt_flags = 0;
281 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
282 nxtinfo->nxt_flags |= SCTP_UNORDERED;
284 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
285 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
287 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
288 nxtinfo->nxt_flags |= SCTP_COMPLETE;
290 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
291 nxtinfo->nxt_length = seinfo->sreinfo_next_length;
292 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
293 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
294 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
296 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
297 cmh->cmsg_level = IPPROTO_SCTP;
298 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
301 cmh->cmsg_type = SCTP_EXTRCV;
302 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
305 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
306 cmh->cmsg_type = SCTP_SNDRCV;
308 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
316 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
318 uint32_t gap, i, cumackp1;
321 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
324 cumackp1 = asoc->cumulative_tsn + 1;
325 if (SCTP_TSN_GT(cumackp1, tsn)) {
327 * this tsn is behind the cum ack and thus we don't need to
328 * worry about it being moved from one to the other.
332 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
333 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
334 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
335 sctp_print_mapping_array(asoc);
337 panic("Things are really messed up now!!");
340 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
341 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
342 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
343 asoc->highest_tsn_inside_nr_map = tsn;
345 if (tsn == asoc->highest_tsn_inside_map) {
346 /* We must back down to see what the new highest is */
347 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
348 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
349 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
350 asoc->highest_tsn_inside_map = i;
356 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
363 * We are delivering currently from the reassembly queue. We must continue to
364 * deliver until we either: 1) run out of space. 2) run out of sequential
365 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
368 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
370 struct sctp_tmit_chunk *chk, *nchk;
375 struct sctp_queued_to_read *control, *ctl, *nctl;
380 cntDel = stream_no = 0;
381 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
382 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
383 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
384 /* socket above is long gone or going.. */
386 asoc->fragmented_delivery_inprogress = 0;
387 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
388 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
389 asoc->size_on_reasm_queue -= chk->send_size;
390 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
392 * Lose the data pointer, since its in the socket
396 sctp_m_freem(chk->data);
399 /* Now free the address and data */
400 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
401 /* sa_ignore FREED_MEMORY */
405 SCTP_TCB_LOCK_ASSERT(stcb);
406 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
407 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
408 /* Can't deliver more :< */
411 stream_no = chk->rec.data.stream_number;
412 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
413 if (nxt_todel != chk->rec.data.stream_seq &&
414 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
416 * Not the next sequence to deliver in its stream OR
421 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
423 control = sctp_build_readq_entry_chk(stcb, chk);
424 if (control == NULL) {
428 /* save it off for our future deliveries */
429 stcb->asoc.control_pdapi = control;
430 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
434 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
435 sctp_add_to_readq(stcb->sctp_ep,
436 stcb, control, &stcb->sctp_socket->so_rcv, end,
437 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
440 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
444 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
445 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
446 stcb->asoc.control_pdapi,
447 chk->data, end, chk->rec.data.TSN_seq,
448 &stcb->sctp_socket->so_rcv)) {
450 * something is very wrong, either
451 * control_pdapi is NULL, or the tail_mbuf
452 * is corrupt, or there is a EOM already on
455 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
459 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
460 panic("This should not happen control_pdapi NULL?");
462 /* if we did not panic, it was a EOM */
463 panic("Bad chunking ??");
465 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
466 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
468 SCTP_PRINTF("Bad chunking ??\n");
469 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
477 /* pull it we did it */
478 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
479 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
480 asoc->fragmented_delivery_inprogress = 0;
481 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
482 asoc->strmin[stream_no].last_sequence_delivered++;
484 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
485 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
487 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
489 * turn the flag back on since we just delivered
492 asoc->fragmented_delivery_inprogress = 1;
494 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
495 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
496 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
497 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
499 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
500 asoc->size_on_reasm_queue -= chk->send_size;
501 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
502 /* free up the chk */
504 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
506 if (asoc->fragmented_delivery_inprogress == 0) {
508 * Now lets see if we can deliver the next one on
511 struct sctp_stream_in *strm;
513 strm = &asoc->strmin[stream_no];
514 nxt_todel = strm->last_sequence_delivered + 1;
515 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
516 /* Deliver more if we can. */
517 if (nxt_todel == ctl->sinfo_ssn) {
518 TAILQ_REMOVE(&strm->inqueue, ctl, next);
519 asoc->size_on_all_streams -= ctl->length;
520 sctp_ucount_decr(asoc->cnt_on_all_streams);
521 strm->last_sequence_delivered++;
522 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
523 sctp_add_to_readq(stcb->sctp_ep, stcb,
525 &stcb->sctp_socket->so_rcv, 1,
526 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
530 nxt_todel = strm->last_sequence_delivered + 1;
538 * Queue the chunk either right into the socket buffer if it is the next one
539 * to go OR put it in the correct place in the delivery queue. If we do
540 * append to the so_buf, keep doing so until we are out of order. One big
541 * question still remains, what to do when the socket buffer is FULL??
544 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
545 struct sctp_queued_to_read *control, int *abort_flag)
548 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
549 * all the data in one stream this could happen quite rapidly. One
550 * could use the TSN to keep track of things, but this scheme breaks
551 * down in the other type of stream useage that could occur. Send a
552 * single msg to stream 0, send 4Billion messages to stream 1, now
553 * send a message to stream 0. You have a situation where the TSN
554 * has wrapped but not in the stream. Is this worth worrying about
555 * or should we just change our queue sort at the bottom to be by
558 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
559 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
560 * assignment this could happen... and I don't see how this would be
561 * a violation. So for now I am undecided an will leave the sort by
562 * SSN alone. Maybe a hybred approach is the answer
565 struct sctp_stream_in *strm;
566 struct sctp_queued_to_read *at;
572 asoc->size_on_all_streams += control->length;
573 sctp_ucount_incr(asoc->cnt_on_all_streams);
574 strm = &asoc->strmin[control->sinfo_stream];
575 nxt_todel = strm->last_sequence_delivered + 1;
576 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
577 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
579 SCTPDBG(SCTP_DEBUG_INDATA1,
580 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
581 (uint32_t) control->sinfo_stream,
582 (uint32_t) strm->last_sequence_delivered,
583 (uint32_t) nxt_todel);
584 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
585 /* The incoming sseq is behind where we last delivered? */
586 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
587 control->sinfo_ssn, strm->last_sequence_delivered);
590 * throw it in the stream so it gets cleaned up in
591 * association destruction
593 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
594 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
595 0, M_DONTWAIT, 1, MT_DATA);
597 struct sctp_paramhdr *ph;
600 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
601 (sizeof(uint32_t) * 3);
602 ph = mtod(oper, struct sctp_paramhdr *);
603 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
604 ph->param_length = htons(SCTP_BUF_LEN(oper));
605 ippp = (uint32_t *) (ph + 1);
606 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
608 *ippp = control->sinfo_tsn;
610 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
612 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
613 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
618 if (nxt_todel == control->sinfo_ssn) {
619 /* can be delivered right away? */
620 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
621 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
623 /* EY it wont be queued if it could be delivered directly */
625 asoc->size_on_all_streams -= control->length;
626 sctp_ucount_decr(asoc->cnt_on_all_streams);
627 strm->last_sequence_delivered++;
629 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
630 sctp_add_to_readq(stcb->sctp_ep, stcb,
632 &stcb->sctp_socket->so_rcv, 1,
633 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
634 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
636 nxt_todel = strm->last_sequence_delivered + 1;
637 if (nxt_todel == control->sinfo_ssn) {
638 TAILQ_REMOVE(&strm->inqueue, control, next);
639 asoc->size_on_all_streams -= control->length;
640 sctp_ucount_decr(asoc->cnt_on_all_streams);
641 strm->last_sequence_delivered++;
643 * We ignore the return of deliver_data here
644 * since we always can hold the chunk on the
645 * d-queue. And we have a finite number that
646 * can be delivered from the strq.
648 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
649 sctp_log_strm_del(control, NULL,
650 SCTP_STR_LOG_FROM_IMMED_DEL);
652 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
653 sctp_add_to_readq(stcb->sctp_ep, stcb,
655 &stcb->sctp_socket->so_rcv, 1,
656 SCTP_READ_LOCK_NOT_HELD,
665 * Ok, we did not deliver this guy, find the correct place
666 * to put it on the queue.
668 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
671 if (TAILQ_EMPTY(&strm->inqueue)) {
673 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
674 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
676 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
678 TAILQ_FOREACH(at, &strm->inqueue, next) {
679 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
681 * one in queue is bigger than the
682 * new one, insert before this one
684 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
685 sctp_log_strm_del(control, at,
686 SCTP_STR_LOG_FROM_INSERT_MD);
688 TAILQ_INSERT_BEFORE(at, control, next);
690 } else if (at->sinfo_ssn == control->sinfo_ssn) {
692 * Gak, He sent me a duplicate str
696 * foo bar, I guess I will just free
697 * this new guy, should we abort
698 * too? FIX ME MAYBE? Or it COULD be
699 * that the SSN's have wrapped.
700 * Maybe I should compare to TSN
701 * somehow... sigh for now just blow
706 sctp_m_freem(control->data);
707 control->data = NULL;
708 asoc->size_on_all_streams -= control->length;
709 sctp_ucount_decr(asoc->cnt_on_all_streams);
710 if (control->whoFrom) {
711 sctp_free_remote_addr(control->whoFrom);
712 control->whoFrom = NULL;
714 sctp_free_a_readq(stcb, control);
717 if (TAILQ_NEXT(at, next) == NULL) {
719 * We are at the end, insert
722 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
723 sctp_log_strm_del(control, at,
724 SCTP_STR_LOG_FROM_INSERT_TL);
726 TAILQ_INSERT_AFTER(&strm->inqueue,
737 * Returns two things: You get the total size of the deliverable parts of the
738 * first fragmented message on the reassembly queue. And you get a 1 back if
739 * all of the message is ready or a 0 back if the message is still incomplete
742 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
744 struct sctp_tmit_chunk *chk;
748 chk = TAILQ_FIRST(&asoc->reasmqueue);
750 /* nothing on the queue */
753 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
754 /* Not a first on the queue */
757 tsn = chk->rec.data.TSN_seq;
758 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
759 if (tsn != chk->rec.data.TSN_seq) {
762 *t_size += chk->send_size;
763 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
772 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
774 struct sctp_tmit_chunk *chk;
776 uint32_t tsize, pd_point;
779 chk = TAILQ_FIRST(&asoc->reasmqueue);
782 asoc->size_on_reasm_queue = 0;
783 asoc->cnt_on_reasm_queue = 0;
786 if (asoc->fragmented_delivery_inprogress == 0) {
788 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
789 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
790 (nxt_todel == chk->rec.data.stream_seq ||
791 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
793 * Yep the first one is here and its ok to deliver
796 if (stcb->sctp_socket) {
797 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
798 stcb->sctp_ep->partial_delivery_point);
800 pd_point = stcb->sctp_ep->partial_delivery_point;
802 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
805 * Yes, we setup to start reception, by
806 * backing down the TSN just in case we
807 * can't deliver. If we
809 asoc->fragmented_delivery_inprogress = 1;
810 asoc->tsn_last_delivered =
811 chk->rec.data.TSN_seq - 1;
813 chk->rec.data.stream_number;
814 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
815 asoc->pdapi_ppid = chk->rec.data.payloadtype;
816 asoc->fragment_flags = chk->rec.data.rcv_flags;
817 sctp_service_reassembly(stcb, asoc);
822 * Service re-assembly will deliver stream data queued at
823 * the end of fragmented delivery.. but it wont know to go
824 * back and call itself again... we do that here with the
827 sctp_service_reassembly(stcb, asoc);
828 if (asoc->fragmented_delivery_inprogress == 0) {
830 * finished our Fragmented delivery, could be more
839 * Dump onto the re-assembly queue, in its proper place. After dumping on the
840 * queue, see if anthing can be delivered. If so pull it off (or as much as
841 * we can. If we run out of space then we must dump what we can and set the
842 * appropriate flag to say we queued what we could.
845 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
846 struct sctp_tmit_chunk *chk, int *abort_flag)
849 uint32_t cum_ackp1, prev_tsn, post_tsn;
850 struct sctp_tmit_chunk *at, *prev, *next;
853 cum_ackp1 = asoc->tsn_last_delivered + 1;
854 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
855 /* This is the first one on the queue */
856 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
858 * we do not check for delivery of anything when only one
861 asoc->size_on_reasm_queue = chk->send_size;
862 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
863 if (chk->rec.data.TSN_seq == cum_ackp1) {
864 if (asoc->fragmented_delivery_inprogress == 0 &&
865 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
866 SCTP_DATA_FIRST_FRAG) {
868 * An empty queue, no delivery inprogress,
869 * we hit the next one and it does NOT have
870 * a FIRST fragment mark.
872 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
873 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
874 0, M_DONTWAIT, 1, MT_DATA);
877 struct sctp_paramhdr *ph;
881 sizeof(struct sctp_paramhdr) +
882 (sizeof(uint32_t) * 3);
883 ph = mtod(oper, struct sctp_paramhdr *);
885 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
886 ph->param_length = htons(SCTP_BUF_LEN(oper));
887 ippp = (uint32_t *) (ph + 1);
888 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
890 *ippp = chk->rec.data.TSN_seq;
892 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
895 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
896 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
898 } else if (asoc->fragmented_delivery_inprogress &&
899 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
901 * We are doing a partial delivery and the
902 * NEXT chunk MUST be either the LAST or
903 * MIDDLE fragment NOT a FIRST
905 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
906 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
907 0, M_DONTWAIT, 1, MT_DATA);
909 struct sctp_paramhdr *ph;
913 sizeof(struct sctp_paramhdr) +
914 (3 * sizeof(uint32_t));
915 ph = mtod(oper, struct sctp_paramhdr *);
917 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
918 ph->param_length = htons(SCTP_BUF_LEN(oper));
919 ippp = (uint32_t *) (ph + 1);
920 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
922 *ippp = chk->rec.data.TSN_seq;
924 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
926 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
927 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
929 } else if (asoc->fragmented_delivery_inprogress) {
931 * Here we are ok with a MIDDLE or LAST
934 if (chk->rec.data.stream_number !=
935 asoc->str_of_pdapi) {
936 /* Got to be the right STR No */
937 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
938 chk->rec.data.stream_number,
940 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
941 0, M_DONTWAIT, 1, MT_DATA);
943 struct sctp_paramhdr *ph;
947 sizeof(struct sctp_paramhdr) +
948 (sizeof(uint32_t) * 3);
950 struct sctp_paramhdr *);
952 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
954 htons(SCTP_BUF_LEN(oper));
955 ippp = (uint32_t *) (ph + 1);
956 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
958 *ippp = chk->rec.data.TSN_seq;
960 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
962 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
963 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
965 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
966 SCTP_DATA_UNORDERED &&
967 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
968 /* Got to be the right STR Seq */
969 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
970 chk->rec.data.stream_seq,
972 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
973 0, M_DONTWAIT, 1, MT_DATA);
975 struct sctp_paramhdr *ph;
979 sizeof(struct sctp_paramhdr) +
980 (3 * sizeof(uint32_t));
982 struct sctp_paramhdr *);
984 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
986 htons(SCTP_BUF_LEN(oper));
987 ippp = (uint32_t *) (ph + 1);
988 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
990 *ippp = chk->rec.data.TSN_seq;
992 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
995 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
996 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1003 /* Find its place */
1004 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1005 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1007 * one in queue is bigger than the new one, insert
1011 asoc->size_on_reasm_queue += chk->send_size;
1012 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1014 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1016 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1017 /* Gak, He sent me a duplicate str seq number */
1019 * foo bar, I guess I will just free this new guy,
1020 * should we abort too? FIX ME MAYBE? Or it COULD be
1021 * that the SSN's have wrapped. Maybe I should
1022 * compare to TSN somehow... sigh for now just blow
1026 sctp_m_freem(chk->data);
1029 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1033 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1035 * We are at the end, insert it after this
1038 /* check it first */
1039 asoc->size_on_reasm_queue += chk->send_size;
1040 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1041 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1046 /* Now the audits */
1048 prev_tsn = chk->rec.data.TSN_seq - 1;
1049 if (prev_tsn == prev->rec.data.TSN_seq) {
1051 * Ok the one I am dropping onto the end is the
1052 * NEXT. A bit of valdiation here.
1054 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1055 SCTP_DATA_FIRST_FRAG ||
1056 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1057 SCTP_DATA_MIDDLE_FRAG) {
1059 * Insert chk MUST be a MIDDLE or LAST
1062 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1063 SCTP_DATA_FIRST_FRAG) {
1064 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1065 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1066 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1067 0, M_DONTWAIT, 1, MT_DATA);
1069 struct sctp_paramhdr *ph;
1072 SCTP_BUF_LEN(oper) =
1073 sizeof(struct sctp_paramhdr) +
1074 (3 * sizeof(uint32_t));
1076 struct sctp_paramhdr *);
1078 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1080 htons(SCTP_BUF_LEN(oper));
1081 ippp = (uint32_t *) (ph + 1);
1082 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1084 *ippp = chk->rec.data.TSN_seq;
1086 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1089 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1090 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1094 if (chk->rec.data.stream_number !=
1095 prev->rec.data.stream_number) {
1097 * Huh, need the correct STR here,
1098 * they must be the same.
1100 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1101 chk->rec.data.stream_number,
1102 prev->rec.data.stream_number);
1103 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1104 0, M_DONTWAIT, 1, MT_DATA);
1106 struct sctp_paramhdr *ph;
1109 SCTP_BUF_LEN(oper) =
1110 sizeof(struct sctp_paramhdr) +
1111 (3 * sizeof(uint32_t));
1113 struct sctp_paramhdr *);
1115 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1117 htons(SCTP_BUF_LEN(oper));
1118 ippp = (uint32_t *) (ph + 1);
1119 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1121 *ippp = chk->rec.data.TSN_seq;
1123 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1125 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1126 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1130 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1131 chk->rec.data.stream_seq !=
1132 prev->rec.data.stream_seq) {
1134 * Huh, need the correct STR here,
1135 * they must be the same.
1137 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1138 chk->rec.data.stream_seq,
1139 prev->rec.data.stream_seq);
1140 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1141 0, M_DONTWAIT, 1, MT_DATA);
1143 struct sctp_paramhdr *ph;
1146 SCTP_BUF_LEN(oper) =
1147 sizeof(struct sctp_paramhdr) +
1148 (3 * sizeof(uint32_t));
1150 struct sctp_paramhdr *);
1152 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1154 htons(SCTP_BUF_LEN(oper));
1155 ippp = (uint32_t *) (ph + 1);
1156 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1158 *ippp = chk->rec.data.TSN_seq;
1160 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1162 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1163 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1167 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1168 SCTP_DATA_LAST_FRAG) {
1169 /* Insert chk MUST be a FIRST */
1170 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1171 SCTP_DATA_FIRST_FRAG) {
1172 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1173 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1174 0, M_DONTWAIT, 1, MT_DATA);
1176 struct sctp_paramhdr *ph;
1179 SCTP_BUF_LEN(oper) =
1180 sizeof(struct sctp_paramhdr) +
1181 (3 * sizeof(uint32_t));
1183 struct sctp_paramhdr *);
1185 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1187 htons(SCTP_BUF_LEN(oper));
1188 ippp = (uint32_t *) (ph + 1);
1189 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1191 *ippp = chk->rec.data.TSN_seq;
1193 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1196 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1197 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1205 post_tsn = chk->rec.data.TSN_seq + 1;
1206 if (post_tsn == next->rec.data.TSN_seq) {
1208 * Ok the one I am inserting ahead of is my NEXT
1209 * one. A bit of valdiation here.
1211 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1212 /* Insert chk MUST be a last fragment */
1213 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1214 != SCTP_DATA_LAST_FRAG) {
1215 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1216 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1217 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1218 0, M_DONTWAIT, 1, MT_DATA);
1220 struct sctp_paramhdr *ph;
1223 SCTP_BUF_LEN(oper) =
1224 sizeof(struct sctp_paramhdr) +
1225 (3 * sizeof(uint32_t));
1227 struct sctp_paramhdr *);
1229 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1231 htons(SCTP_BUF_LEN(oper));
1232 ippp = (uint32_t *) (ph + 1);
1233 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1235 *ippp = chk->rec.data.TSN_seq;
1237 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1239 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1240 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1244 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1245 SCTP_DATA_MIDDLE_FRAG ||
1246 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1247 SCTP_DATA_LAST_FRAG) {
1249 * Insert chk CAN be MIDDLE or FIRST NOT
1252 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1253 SCTP_DATA_LAST_FRAG) {
1254 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1255 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1256 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1257 0, M_DONTWAIT, 1, MT_DATA);
1259 struct sctp_paramhdr *ph;
1262 SCTP_BUF_LEN(oper) =
1263 sizeof(struct sctp_paramhdr) +
1264 (3 * sizeof(uint32_t));
1266 struct sctp_paramhdr *);
1268 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1270 htons(SCTP_BUF_LEN(oper));
1271 ippp = (uint32_t *) (ph + 1);
1272 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1274 *ippp = chk->rec.data.TSN_seq;
1276 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1279 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1280 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1284 if (chk->rec.data.stream_number !=
1285 next->rec.data.stream_number) {
1287 * Huh, need the correct STR here,
1288 * they must be the same.
1290 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1291 chk->rec.data.stream_number,
1292 next->rec.data.stream_number);
1293 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1294 0, M_DONTWAIT, 1, MT_DATA);
1296 struct sctp_paramhdr *ph;
1299 SCTP_BUF_LEN(oper) =
1300 sizeof(struct sctp_paramhdr) +
1301 (3 * sizeof(uint32_t));
1303 struct sctp_paramhdr *);
1305 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1307 htons(SCTP_BUF_LEN(oper));
1308 ippp = (uint32_t *) (ph + 1);
1309 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1311 *ippp = chk->rec.data.TSN_seq;
1313 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1316 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1317 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1321 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1322 chk->rec.data.stream_seq !=
1323 next->rec.data.stream_seq) {
1325 * Huh, need the correct STR here,
1326 * they must be the same.
1328 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1329 chk->rec.data.stream_seq,
1330 next->rec.data.stream_seq);
1331 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1332 0, M_DONTWAIT, 1, MT_DATA);
1334 struct sctp_paramhdr *ph;
1337 SCTP_BUF_LEN(oper) =
1338 sizeof(struct sctp_paramhdr) +
1339 (3 * sizeof(uint32_t));
1341 struct sctp_paramhdr *);
1343 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1345 htons(SCTP_BUF_LEN(oper));
1346 ippp = (uint32_t *) (ph + 1);
1347 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1349 *ippp = chk->rec.data.TSN_seq;
1351 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1353 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1354 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1361 /* Do we need to do some delivery? check */
1362 sctp_deliver_reasm_check(stcb, asoc);
1366 * This is an unfortunate routine. It checks to make sure a evil guy is not
1367 * stuffing us full of bad packet fragments. A broken peer could also do this
1368 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1372 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1375 struct sctp_tmit_chunk *at;
1378 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1379 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1380 /* is it one bigger? */
1381 tsn_est = at->rec.data.TSN_seq + 1;
1382 if (tsn_est == TSN_seq) {
1383 /* yep. It better be a last then */
1384 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1385 SCTP_DATA_LAST_FRAG) {
1387 * Ok this guy belongs next to a guy
1388 * that is NOT last, it should be a
1389 * middle/last, not a complete
1395 * This guy is ok since its a LAST
1396 * and the new chunk is a fully
1397 * self- contained one.
1402 } else if (TSN_seq == at->rec.data.TSN_seq) {
1403 /* Software error since I have a dup? */
1407 * Ok, 'at' is larger than new chunk but does it
1408 * need to be right before it.
1410 tsn_est = TSN_seq + 1;
1411 if (tsn_est == at->rec.data.TSN_seq) {
1412 /* Yep, It better be a first */
1413 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1414 SCTP_DATA_FIRST_FRAG) {
1427 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1428 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1429 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1430 int *break_flag, int last_chunk)
1432 /* Process a data chunk */
1433 /* struct sctp_tmit_chunk *chk; */
1434 struct sctp_tmit_chunk *chk;
1438 int need_reasm_check = 0;
1439 uint16_t strmno, strmseq;
1441 struct sctp_queued_to_read *control;
1443 uint32_t protocol_id;
1444 uint8_t chunk_flags;
1445 struct sctp_stream_reset_list *liste;
1448 tsn = ntohl(ch->dp.tsn);
1449 chunk_flags = ch->ch.chunk_flags;
1450 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1451 asoc->send_sack = 1;
1453 protocol_id = ch->dp.protocol_id;
1454 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1455 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1456 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1461 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1462 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1463 /* It is a duplicate */
1464 SCTP_STAT_INCR(sctps_recvdupdata);
1465 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1466 /* Record a dup for the next outbound sack */
1467 asoc->dup_tsns[asoc->numduptsns] = tsn;
1470 asoc->send_sack = 1;
1473 /* Calculate the number of TSN's between the base and this TSN */
1474 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1475 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1476 /* Can't hold the bit in the mapping at max array, toss it */
1479 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1480 SCTP_TCB_LOCK_ASSERT(stcb);
1481 if (sctp_expand_mapping_array(asoc, gap)) {
1482 /* Can't expand, drop it */
1486 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1489 /* See if we have received this one already */
1490 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1491 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1492 SCTP_STAT_INCR(sctps_recvdupdata);
1493 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1494 /* Record a dup for the next outbound sack */
1495 asoc->dup_tsns[asoc->numduptsns] = tsn;
1498 asoc->send_sack = 1;
1502 * Check to see about the GONE flag, duplicates would cause a sack
1503 * to be sent up above
1505 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1506 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1507 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1510 * wait a minute, this guy is gone, there is no longer a
1511 * receiver. Send peer an ABORT!
1513 struct mbuf *op_err;
1515 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1516 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1521 * Now before going further we see if there is room. If NOT then we
1522 * MAY let one through only IF this TSN is the one we are waiting
1523 * for on a partial delivery API.
1526 /* now do the tests */
1527 if (((asoc->cnt_on_all_streams +
1528 asoc->cnt_on_reasm_queue +
1529 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1530 (((int)asoc->my_rwnd) <= 0)) {
1532 * When we have NO room in the rwnd we check to make sure
1533 * the reader is doing its job...
1535 if (stcb->sctp_socket->so_rcv.sb_cc) {
1536 /* some to read, wake-up */
1537 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1540 so = SCTP_INP_SO(stcb->sctp_ep);
1541 atomic_add_int(&stcb->asoc.refcnt, 1);
1542 SCTP_TCB_UNLOCK(stcb);
1543 SCTP_SOCKET_LOCK(so, 1);
1544 SCTP_TCB_LOCK(stcb);
1545 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1546 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1547 /* assoc was freed while we were unlocked */
1548 SCTP_SOCKET_UNLOCK(so, 1);
1552 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1553 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1554 SCTP_SOCKET_UNLOCK(so, 1);
1557 /* now is it in the mapping array of what we have accepted? */
1558 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1559 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1560 /* Nope not in the valid range dump it */
1561 sctp_set_rwnd(stcb, asoc);
1562 if ((asoc->cnt_on_all_streams +
1563 asoc->cnt_on_reasm_queue +
1564 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1565 SCTP_STAT_INCR(sctps_datadropchklmt);
1567 SCTP_STAT_INCR(sctps_datadroprwnd);
1573 strmno = ntohs(ch->dp.stream_id);
1574 if (strmno >= asoc->streamincnt) {
1575 struct sctp_paramhdr *phdr;
1578 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1579 0, M_DONTWAIT, 1, MT_DATA);
1581 /* add some space up front so prepend will work well */
1582 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1583 phdr = mtod(mb, struct sctp_paramhdr *);
1585 * Error causes are just param's and this one has
1586 * two back to back phdr, one with the error type
1587 * and size, the other with the streamid and a rsvd
1589 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1590 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1591 phdr->param_length =
1592 htons(sizeof(struct sctp_paramhdr) * 2);
1594 /* We insert the stream in the type field */
1595 phdr->param_type = ch->dp.stream_id;
1596 /* And set the length to 0 for the rsvd field */
1597 phdr->param_length = 0;
1598 sctp_queue_op_err(stcb, mb);
1600 SCTP_STAT_INCR(sctps_badsid);
1601 SCTP_TCB_LOCK_ASSERT(stcb);
1602 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1603 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1604 asoc->highest_tsn_inside_nr_map = tsn;
1606 if (tsn == (asoc->cumulative_tsn + 1)) {
1607 /* Update cum-ack */
1608 asoc->cumulative_tsn = tsn;
1613 * Before we continue lets validate that we are not being fooled by
1614 * an evil attacker. We can only have 4k chunks based on our TSN
1615 * spread allowed by the mapping array 512 * 8 bits, so there is no
1616 * way our stream sequence numbers could have wrapped. We of course
1617 * only validate the FIRST fragment so the bit must be set.
1619 strmseq = ntohs(ch->dp.stream_sequence);
1620 #ifdef SCTP_ASOCLOG_OF_TSNS
1621 SCTP_TCB_LOCK_ASSERT(stcb);
1622 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1623 asoc->tsn_in_at = 0;
1624 asoc->tsn_in_wrapped = 1;
1626 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1627 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1628 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1629 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1630 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1631 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1632 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1633 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1636 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1637 (TAILQ_EMPTY(&asoc->resetHead)) &&
1638 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1639 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1640 /* The incoming sseq is behind where we last delivered? */
1641 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1642 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1643 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1644 0, M_DONTWAIT, 1, MT_DATA);
1646 struct sctp_paramhdr *ph;
1649 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1650 (3 * sizeof(uint32_t));
1651 ph = mtod(oper, struct sctp_paramhdr *);
1652 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1653 ph->param_length = htons(SCTP_BUF_LEN(oper));
1654 ippp = (uint32_t *) (ph + 1);
1655 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1659 *ippp = ((strmno << 16) | strmseq);
1662 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1663 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1667 /************************************
1668 * From here down we may find ch-> invalid
1669 * so its a good idea NOT to use it.
1670 *************************************/
1672 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1673 if (last_chunk == 0) {
1674 dmbuf = SCTP_M_COPYM(*m,
1675 (offset + sizeof(struct sctp_data_chunk)),
1676 the_len, M_DONTWAIT);
1677 #ifdef SCTP_MBUF_LOGGING
1678 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1681 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1682 if (SCTP_BUF_IS_EXTENDED(mat)) {
1683 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1689 /* We can steal the last chunk */
1693 /* lop off the top part */
1694 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1695 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1696 l_len = SCTP_BUF_LEN(dmbuf);
1699 * need to count up the size hopefully does not hit
1705 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1706 l_len += SCTP_BUF_LEN(lat);
1709 if (l_len > the_len) {
1710 /* Trim the end round bytes off too */
1711 m_adj(dmbuf, -(l_len - the_len));
1714 if (dmbuf == NULL) {
1715 SCTP_STAT_INCR(sctps_nomem);
1718 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1719 asoc->fragmented_delivery_inprogress == 0 &&
1720 TAILQ_EMPTY(&asoc->resetHead) &&
1722 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1723 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1724 /* Candidate for express delivery */
1726 * Its not fragmented, No PD-API is up, Nothing in the
1727 * delivery queue, Its un-ordered OR ordered and the next to
1728 * deliver AND nothing else is stuck on the stream queue,
1729 * And there is room for it in the socket buffer. Lets just
1730 * stuff it up the buffer....
1733 /* It would be nice to avoid this copy if we could :< */
1734 sctp_alloc_a_readq(stcb, control);
1735 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1740 if (control == NULL) {
1741 goto failed_express_del;
1743 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1744 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1745 asoc->highest_tsn_inside_nr_map = tsn;
1747 sctp_add_to_readq(stcb->sctp_ep, stcb,
1748 control, &stcb->sctp_socket->so_rcv,
1749 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1751 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1752 /* for ordered, bump what we delivered */
1753 asoc->strmin[strmno].last_sequence_delivered++;
1755 SCTP_STAT_INCR(sctps_recvexpress);
1756 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1757 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1758 SCTP_STR_LOG_FROM_EXPRS_DEL);
1762 goto finish_express_del;
1765 /* If we reach here this is a new chunk */
1768 /* Express for fragmented delivery? */
1769 if ((asoc->fragmented_delivery_inprogress) &&
1770 (stcb->asoc.control_pdapi) &&
1771 (asoc->str_of_pdapi == strmno) &&
1772 (asoc->ssn_of_pdapi == strmseq)
1774 control = stcb->asoc.control_pdapi;
1775 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1776 /* Can't be another first? */
1777 goto failed_pdapi_express_del;
1779 if (tsn == (control->sinfo_tsn + 1)) {
1780 /* Yep, we can add it on */
1783 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1786 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1788 &stcb->sctp_socket->so_rcv)) {
1789 SCTP_PRINTF("Append fails end:%d\n", end);
1790 goto failed_pdapi_express_del;
1792 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1793 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1794 asoc->highest_tsn_inside_nr_map = tsn;
1796 SCTP_STAT_INCR(sctps_recvexpressm);
1797 control->sinfo_tsn = tsn;
1798 asoc->tsn_last_delivered = tsn;
1799 asoc->fragment_flags = chunk_flags;
1800 asoc->tsn_of_pdapi_last_delivered = tsn;
1801 asoc->last_flags_delivered = chunk_flags;
1802 asoc->last_strm_seq_delivered = strmseq;
1803 asoc->last_strm_no_delivered = strmno;
1805 /* clean up the flags and such */
1806 asoc->fragmented_delivery_inprogress = 0;
1807 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1808 asoc->strmin[strmno].last_sequence_delivered++;
1810 stcb->asoc.control_pdapi = NULL;
1811 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1813 * There could be another message
1816 need_reasm_check = 1;
1820 goto finish_express_del;
1823 failed_pdapi_express_del:
1825 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1826 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1827 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1828 asoc->highest_tsn_inside_nr_map = tsn;
1831 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1832 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1833 asoc->highest_tsn_inside_map = tsn;
1836 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1837 sctp_alloc_a_chunk(stcb, chk);
1839 /* No memory so we drop the chunk */
1840 SCTP_STAT_INCR(sctps_nomem);
1841 if (last_chunk == 0) {
1842 /* we copied it, free the copy */
1843 sctp_m_freem(dmbuf);
1847 chk->rec.data.TSN_seq = tsn;
1848 chk->no_fr_allowed = 0;
1849 chk->rec.data.stream_seq = strmseq;
1850 chk->rec.data.stream_number = strmno;
1851 chk->rec.data.payloadtype = protocol_id;
1852 chk->rec.data.context = stcb->asoc.context;
1853 chk->rec.data.doing_fast_retransmit = 0;
1854 chk->rec.data.rcv_flags = chunk_flags;
1856 chk->send_size = the_len;
1858 atomic_add_int(&net->ref_count, 1);
1861 sctp_alloc_a_readq(stcb, control);
1862 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1867 if (control == NULL) {
1868 /* No memory so we drop the chunk */
1869 SCTP_STAT_INCR(sctps_nomem);
1870 if (last_chunk == 0) {
1871 /* we copied it, free the copy */
1872 sctp_m_freem(dmbuf);
1876 control->length = the_len;
1879 /* Mark it as received */
1880 /* Now queue it where it belongs */
1881 if (control != NULL) {
1882 /* First a sanity check */
1883 if (asoc->fragmented_delivery_inprogress) {
1885 * Ok, we have a fragmented delivery in progress if
1886 * this chunk is next to deliver OR belongs in our
1887 * view to the reassembly, the peer is evil or
1890 uint32_t estimate_tsn;
1892 estimate_tsn = asoc->tsn_last_delivered + 1;
1893 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1894 (estimate_tsn == control->sinfo_tsn)) {
1895 /* Evil/Broke peer */
1896 sctp_m_freem(control->data);
1897 control->data = NULL;
1898 if (control->whoFrom) {
1899 sctp_free_remote_addr(control->whoFrom);
1900 control->whoFrom = NULL;
1902 sctp_free_a_readq(stcb, control);
1903 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1904 0, M_DONTWAIT, 1, MT_DATA);
1906 struct sctp_paramhdr *ph;
1909 SCTP_BUF_LEN(oper) =
1910 sizeof(struct sctp_paramhdr) +
1911 (3 * sizeof(uint32_t));
1912 ph = mtod(oper, struct sctp_paramhdr *);
1914 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1915 ph->param_length = htons(SCTP_BUF_LEN(oper));
1916 ippp = (uint32_t *) (ph + 1);
1917 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1921 *ippp = ((strmno << 16) | strmseq);
1923 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1924 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1928 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1929 sctp_m_freem(control->data);
1930 control->data = NULL;
1931 if (control->whoFrom) {
1932 sctp_free_remote_addr(control->whoFrom);
1933 control->whoFrom = NULL;
1935 sctp_free_a_readq(stcb, control);
1937 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1938 0, M_DONTWAIT, 1, MT_DATA);
1940 struct sctp_paramhdr *ph;
1943 SCTP_BUF_LEN(oper) =
1944 sizeof(struct sctp_paramhdr) +
1945 (3 * sizeof(uint32_t));
1947 struct sctp_paramhdr *);
1949 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1951 htons(SCTP_BUF_LEN(oper));
1952 ippp = (uint32_t *) (ph + 1);
1953 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1957 *ippp = ((strmno << 16) | strmseq);
1959 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1960 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1966 /* No PDAPI running */
1967 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1969 * Reassembly queue is NOT empty validate
1970 * that this tsn does not need to be in
1971 * reasembly queue. If it does then our peer
1972 * is broken or evil.
1974 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1975 sctp_m_freem(control->data);
1976 control->data = NULL;
1977 if (control->whoFrom) {
1978 sctp_free_remote_addr(control->whoFrom);
1979 control->whoFrom = NULL;
1981 sctp_free_a_readq(stcb, control);
1982 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1983 0, M_DONTWAIT, 1, MT_DATA);
1985 struct sctp_paramhdr *ph;
1988 SCTP_BUF_LEN(oper) =
1989 sizeof(struct sctp_paramhdr) +
1990 (3 * sizeof(uint32_t));
1992 struct sctp_paramhdr *);
1994 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1996 htons(SCTP_BUF_LEN(oper));
1997 ippp = (uint32_t *) (ph + 1);
1998 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2002 *ippp = ((strmno << 16) | strmseq);
2004 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2005 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
2011 /* ok, if we reach here we have passed the sanity checks */
2012 if (chunk_flags & SCTP_DATA_UNORDERED) {
2013 /* queue directly into socket buffer */
2014 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2015 sctp_add_to_readq(stcb->sctp_ep, stcb,
2017 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2020 * Special check for when streams are resetting. We
2021 * could be more smart about this and check the
2022 * actual stream to see if it is not being reset..
2023 * that way we would not create a HOLB when amongst
2024 * streams being reset and those not being reset.
2026 * We take complete messages that have a stream reset
2027 * intervening (aka the TSN is after where our
2028 * cum-ack needs to be) off and put them on a
2029 * pending_reply_queue. The reassembly ones we do
2030 * not have to worry about since they are all sorted
2031 * and proceessed by TSN order. It is only the
2032 * singletons I must worry about.
2034 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2035 SCTP_TSN_GT(tsn, liste->tsn)) {
2037 * yep its past where we need to reset... go
2038 * ahead and queue it.
2040 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2042 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2044 struct sctp_queued_to_read *ctlOn,
2046 unsigned char inserted = 0;
2048 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2049 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2053 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2058 if (inserted == 0) {
2060 * must be put at end, use
2061 * prevP (all setup from
2062 * loop) to setup nextP.
2064 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2068 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2075 /* Into the re-assembly queue */
2076 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2079 * the assoc is now gone and chk was put onto the
2080 * reasm queue, which has all been freed.
2087 if (tsn == (asoc->cumulative_tsn + 1)) {
2088 /* Update cum-ack */
2089 asoc->cumulative_tsn = tsn;
2095 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2097 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2099 SCTP_STAT_INCR(sctps_recvdata);
2100 /* Set it present please */
2101 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2102 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2104 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2105 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2106 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2108 /* check the special flag for stream resets */
2109 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2110 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2112 * we have finished working through the backlogged TSN's now
2113 * time to reset streams. 1: call reset function. 2: free
2114 * pending_reply space 3: distribute any chunks in
2115 * pending_reply_queue.
2117 struct sctp_queued_to_read *ctl, *nctl;
2119 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2120 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2121 SCTP_FREE(liste, SCTP_M_STRESET);
2122 /* sa_ignore FREED_MEMORY */
2123 liste = TAILQ_FIRST(&asoc->resetHead);
2124 if (TAILQ_EMPTY(&asoc->resetHead)) {
2125 /* All can be removed */
2126 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2127 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2128 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2134 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2135 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2139 * if ctl->sinfo_tsn is <= liste->tsn we can
2140 * process it which is the NOT of
2141 * ctl->sinfo_tsn > liste->tsn
2143 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2144 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2151 * Now service re-assembly to pick up anything that has been
2152 * held on reassembly queue?
2154 sctp_deliver_reasm_check(stcb, asoc);
2155 need_reasm_check = 0;
2157 if (need_reasm_check) {
2158 /* Another one waits ? */
2159 sctp_deliver_reasm_check(stcb, asoc);
2164 int8_t sctp_map_lookup_tab[256] = {
2165 0, 1, 0, 2, 0, 1, 0, 3,
2166 0, 1, 0, 2, 0, 1, 0, 4,
2167 0, 1, 0, 2, 0, 1, 0, 3,
2168 0, 1, 0, 2, 0, 1, 0, 5,
2169 0, 1, 0, 2, 0, 1, 0, 3,
2170 0, 1, 0, 2, 0, 1, 0, 4,
2171 0, 1, 0, 2, 0, 1, 0, 3,
2172 0, 1, 0, 2, 0, 1, 0, 6,
2173 0, 1, 0, 2, 0, 1, 0, 3,
2174 0, 1, 0, 2, 0, 1, 0, 4,
2175 0, 1, 0, 2, 0, 1, 0, 3,
2176 0, 1, 0, 2, 0, 1, 0, 5,
2177 0, 1, 0, 2, 0, 1, 0, 3,
2178 0, 1, 0, 2, 0, 1, 0, 4,
2179 0, 1, 0, 2, 0, 1, 0, 3,
2180 0, 1, 0, 2, 0, 1, 0, 7,
2181 0, 1, 0, 2, 0, 1, 0, 3,
2182 0, 1, 0, 2, 0, 1, 0, 4,
2183 0, 1, 0, 2, 0, 1, 0, 3,
2184 0, 1, 0, 2, 0, 1, 0, 5,
2185 0, 1, 0, 2, 0, 1, 0, 3,
2186 0, 1, 0, 2, 0, 1, 0, 4,
2187 0, 1, 0, 2, 0, 1, 0, 3,
2188 0, 1, 0, 2, 0, 1, 0, 6,
2189 0, 1, 0, 2, 0, 1, 0, 3,
2190 0, 1, 0, 2, 0, 1, 0, 4,
2191 0, 1, 0, 2, 0, 1, 0, 3,
2192 0, 1, 0, 2, 0, 1, 0, 5,
2193 0, 1, 0, 2, 0, 1, 0, 3,
2194 0, 1, 0, 2, 0, 1, 0, 4,
2195 0, 1, 0, 2, 0, 1, 0, 3,
2196 0, 1, 0, 2, 0, 1, 0, 8
2201 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2204 * Now we also need to check the mapping array in a couple of ways.
2205 * 1) Did we move the cum-ack point?
2207 * When you first glance at this you might think that all entries that
2208 * make up the postion of the cum-ack would be in the nr-mapping
2209 * array only.. i.e. things up to the cum-ack are always
2210 * deliverable. Thats true with one exception, when its a fragmented
2211 * message we may not deliver the data until some threshold (or all
2212 * of it) is in place. So we must OR the nr_mapping_array and
2213 * mapping_array to get a true picture of the cum-ack.
2215 struct sctp_association *asoc;
2218 int slide_from, slide_end, lgap, distance;
2219 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2223 old_cumack = asoc->cumulative_tsn;
2224 old_base = asoc->mapping_array_base_tsn;
2225 old_highest = asoc->highest_tsn_inside_map;
2227 * We could probably improve this a small bit by calculating the
2228 * offset of the current cum-ack as the starting point.
2231 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2232 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2236 /* there is a 0 bit */
2237 at += sctp_map_lookup_tab[val];
2241 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2243 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2244 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2246 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2247 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2249 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2250 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2251 sctp_print_mapping_array(asoc);
2252 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2253 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2255 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2256 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2259 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2260 highest_tsn = asoc->highest_tsn_inside_nr_map;
2262 highest_tsn = asoc->highest_tsn_inside_map;
2264 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2265 /* The complete array was completed by a single FR */
2266 /* highest becomes the cum-ack */
2274 /* clear the array */
2275 clr = ((at + 7) >> 3);
2276 if (clr > asoc->mapping_array_size) {
2277 clr = asoc->mapping_array_size;
2279 memset(asoc->mapping_array, 0, clr);
2280 memset(asoc->nr_mapping_array, 0, clr);
2282 for (i = 0; i < asoc->mapping_array_size; i++) {
2283 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2284 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2285 sctp_print_mapping_array(asoc);
2289 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2290 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2291 } else if (at >= 8) {
2292 /* we can slide the mapping array down */
2293 /* slide_from holds where we hit the first NON 0xff byte */
2296 * now calculate the ceiling of the move using our highest
2299 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2300 slide_end = (lgap >> 3);
2301 if (slide_end < slide_from) {
2302 sctp_print_mapping_array(asoc);
2304 panic("impossible slide");
2306 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2307 lgap, slide_end, slide_from, at);
2311 if (slide_end > asoc->mapping_array_size) {
2313 panic("would overrun buffer");
2315 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2316 asoc->mapping_array_size, slide_end);
2317 slide_end = asoc->mapping_array_size;
2320 distance = (slide_end - slide_from) + 1;
2321 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2322 sctp_log_map(old_base, old_cumack, old_highest,
2323 SCTP_MAP_PREPARE_SLIDE);
2324 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2325 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2327 if (distance + slide_from > asoc->mapping_array_size ||
2330 * Here we do NOT slide forward the array so that
2331 * hopefully when more data comes in to fill it up
2332 * we will be able to slide it forward. Really I
2333 * don't think this should happen :-0
2336 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2337 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2338 (uint32_t) asoc->mapping_array_size,
2339 SCTP_MAP_SLIDE_NONE);
2344 for (ii = 0; ii < distance; ii++) {
2345 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2346 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2349 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2350 asoc->mapping_array[ii] = 0;
2351 asoc->nr_mapping_array[ii] = 0;
2353 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2354 asoc->highest_tsn_inside_map += (slide_from << 3);
2356 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2357 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2359 asoc->mapping_array_base_tsn += (slide_from << 3);
2360 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2361 sctp_log_map(asoc->mapping_array_base_tsn,
2362 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2363 SCTP_MAP_SLIDE_RESULT);
2370 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2372 struct sctp_association *asoc;
2373 uint32_t highest_tsn;
2376 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2377 highest_tsn = asoc->highest_tsn_inside_nr_map;
2379 highest_tsn = asoc->highest_tsn_inside_map;
2383 * Now we need to see if we need to queue a sack or just start the
2384 * timer (if allowed).
2386 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2388 * Ok special case, in SHUTDOWN-SENT case. here we maker
2389 * sure SACK timer is off and instead send a SHUTDOWN and a
2392 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2393 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2394 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2396 sctp_send_shutdown(stcb,
2397 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2398 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2402 /* is there a gap now ? */
2403 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2406 * CMT DAC algorithm: increase number of packets received
2409 stcb->asoc.cmt_dac_pkts_rcvd++;
2411 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2413 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2415 (stcb->asoc.numduptsns) || /* we have dup's */
2416 (is_a_gap) || /* is still a gap */
2417 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2418 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2421 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2422 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2423 (stcb->asoc.send_sack == 0) &&
2424 (stcb->asoc.numduptsns == 0) &&
2425 (stcb->asoc.delayed_ack) &&
2426 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2429 * CMT DAC algorithm: With CMT, delay acks
2430 * even in the face of
2432 * reordering. Therefore, if acks that do not
2433 * have to be sent because of the above
2434 * reasons, will be delayed. That is, acks
2435 * that would have been sent due to gap
2436 * reports will be delayed with DAC. Start
2437 * the delayed ack timer.
2439 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2440 stcb->sctp_ep, stcb, NULL);
2443 * Ok we must build a SACK since the timer
2444 * is pending, we got our first packet OR
2445 * there are gaps or duplicates.
2447 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2448 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2451 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2452 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2453 stcb->sctp_ep, stcb, NULL);
2460 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2462 struct sctp_tmit_chunk *chk;
2463 uint32_t tsize, pd_point;
2466 if (asoc->fragmented_delivery_inprogress) {
2467 sctp_service_reassembly(stcb, asoc);
2469 /* Can we proceed further, i.e. the PD-API is complete */
2470 if (asoc->fragmented_delivery_inprogress) {
2475 * Now is there some other chunk I can deliver from the reassembly
2479 chk = TAILQ_FIRST(&asoc->reasmqueue);
2481 asoc->size_on_reasm_queue = 0;
2482 asoc->cnt_on_reasm_queue = 0;
2485 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2486 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2487 ((nxt_todel == chk->rec.data.stream_seq) ||
2488 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2490 * Yep the first one is here. We setup to start reception,
2491 * by backing down the TSN just in case we can't deliver.
2495 * Before we start though either all of the message should
2496 * be here or the socket buffer max or nothing on the
2497 * delivery queue and something can be delivered.
2499 if (stcb->sctp_socket) {
2500 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2501 stcb->sctp_ep->partial_delivery_point);
2503 pd_point = stcb->sctp_ep->partial_delivery_point;
2505 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2506 asoc->fragmented_delivery_inprogress = 1;
2507 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2508 asoc->str_of_pdapi = chk->rec.data.stream_number;
2509 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2510 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2511 asoc->fragment_flags = chk->rec.data.rcv_flags;
2512 sctp_service_reassembly(stcb, asoc);
2513 if (asoc->fragmented_delivery_inprogress == 0) {
2521 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2522 struct sockaddr *src, struct sockaddr *dst,
2523 struct sctphdr *sh, struct sctp_inpcb *inp,
2524 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2525 uint8_t use_mflowid, uint32_t mflowid,
2526 uint32_t vrf_id, uint16_t port)
2528 struct sctp_data_chunk *ch, chunk_buf;
2529 struct sctp_association *asoc;
2530 int num_chunks = 0; /* number of control chunks processed */
2532 int chk_length, break_flag, last_chunk;
2533 int abort_flag = 0, was_a_gap;
2535 uint32_t highest_tsn;
2538 sctp_set_rwnd(stcb, &stcb->asoc);
2541 SCTP_TCB_LOCK_ASSERT(stcb);
2543 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2544 highest_tsn = asoc->highest_tsn_inside_nr_map;
2546 highest_tsn = asoc->highest_tsn_inside_map;
2548 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2550 * setup where we got the last DATA packet from for any SACK that
2551 * may need to go out. Don't bump the net. This is done ONLY when a
2552 * chunk is assigned.
2554 asoc->last_data_chunk_from = net;
2557 * Now before we proceed we must figure out if this is a wasted
2558 * cluster... i.e. it is a small packet sent in and yet the driver
2559 * underneath allocated a full cluster for it. If so we must copy it
2560 * to a smaller mbuf and free up the cluster mbuf. This will help
2561 * with cluster starvation. Note for __Panda__ we don't do this
2562 * since it has clusters all the way down to 64 bytes.
2564 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2565 /* we only handle mbufs that are singletons.. not chains */
2566 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2568 /* ok lets see if we can copy the data up */
2571 /* get the pointers and copy */
2572 to = mtod(m, caddr_t *);
2573 from = mtod((*mm), caddr_t *);
2574 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2575 /* copy the length and free up the old */
2576 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2578 /* sucess, back copy */
2581 /* We are in trouble in the mbuf world .. yikes */
2585 /* get pointer to the first chunk header */
2586 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2587 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2592 * process all DATA chunks...
2594 *high_tsn = asoc->cumulative_tsn;
2596 asoc->data_pkts_seen++;
2597 while (stop_proc == 0) {
2598 /* validate chunk length */
2599 chk_length = ntohs(ch->ch.chunk_length);
2600 if (length - *offset < chk_length) {
2601 /* all done, mutulated chunk */
2605 if (ch->ch.chunk_type == SCTP_DATA) {
2606 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2608 * Need to send an abort since we had a
2609 * invalid data chunk.
2611 struct mbuf *op_err;
2613 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2614 0, M_DONTWAIT, 1, MT_DATA);
2617 struct sctp_paramhdr *ph;
2620 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2621 (2 * sizeof(uint32_t));
2622 ph = mtod(op_err, struct sctp_paramhdr *);
2624 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2625 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2626 ippp = (uint32_t *) (ph + 1);
2627 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2629 *ippp = asoc->cumulative_tsn;
2632 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2633 sctp_abort_association(inp, stcb, m, iphlen,
2634 src, dst, sh, op_err,
2635 use_mflowid, mflowid,
2639 #ifdef SCTP_AUDITING_ENABLED
2640 sctp_audit_log(0xB1, 0);
2642 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2647 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2648 chk_length, net, high_tsn, &abort_flag, &break_flag,
2657 * Set because of out of rwnd space and no
2658 * drop rep space left.
2664 /* not a data chunk in the data region */
2665 switch (ch->ch.chunk_type) {
2666 case SCTP_INITIATION:
2667 case SCTP_INITIATION_ACK:
2668 case SCTP_SELECTIVE_ACK:
2669 case SCTP_NR_SELECTIVE_ACK:
2670 case SCTP_HEARTBEAT_REQUEST:
2671 case SCTP_HEARTBEAT_ACK:
2672 case SCTP_ABORT_ASSOCIATION:
2674 case SCTP_SHUTDOWN_ACK:
2675 case SCTP_OPERATION_ERROR:
2676 case SCTP_COOKIE_ECHO:
2677 case SCTP_COOKIE_ACK:
2680 case SCTP_SHUTDOWN_COMPLETE:
2681 case SCTP_AUTHENTICATION:
2682 case SCTP_ASCONF_ACK:
2683 case SCTP_PACKET_DROPPED:
2684 case SCTP_STREAM_RESET:
2685 case SCTP_FORWARD_CUM_TSN:
2688 * Now, what do we do with KNOWN chunks that
2689 * are NOT in the right place?
2691 * For now, I do nothing but ignore them. We
2692 * may later want to add sysctl stuff to
2693 * switch out and do either an ABORT() or
2694 * possibly process them.
2696 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2697 struct mbuf *op_err;
2699 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2700 sctp_abort_association(inp, stcb,
2704 use_mflowid, mflowid,
2710 /* unknown chunk type, use bit rules */
2711 if (ch->ch.chunk_type & 0x40) {
2712 /* Add a error report to the queue */
2714 struct sctp_paramhdr *phd;
2716 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2718 phd = mtod(merr, struct sctp_paramhdr *);
2720 * We cheat and use param
2721 * type since we did not
2722 * bother to define a error
2723 * cause struct. They are
2724 * the same basic format
2725 * with different names.
2728 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2730 htons(chk_length + sizeof(*phd));
2731 SCTP_BUF_LEN(merr) = sizeof(*phd);
2732 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_DONTWAIT);
2733 if (SCTP_BUF_NEXT(merr)) {
2734 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2737 sctp_queue_op_err(stcb, merr);
2744 if ((ch->ch.chunk_type & 0x80) == 0) {
2745 /* discard the rest of this packet */
2747 } /* else skip this bad chunk and
2750 } /* switch of chunk type */
2752 *offset += SCTP_SIZE32(chk_length);
2753 if ((*offset >= length) || stop_proc) {
2754 /* no more data left in the mbuf chain */
2758 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2759 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2768 * we need to report rwnd overrun drops.
2770 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2774 * Did we get data, if so update the time for auto-close and
2775 * give peer credit for being alive.
2777 SCTP_STAT_INCR(sctps_recvpktwithdata);
2778 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2779 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2780 stcb->asoc.overall_error_count,
2782 SCTP_FROM_SCTP_INDATA,
2785 stcb->asoc.overall_error_count = 0;
2786 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2788 /* now service all of the reassm queue if needed */
2789 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2790 sctp_service_queues(stcb, asoc);
2792 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2793 /* Assure that we ack right away */
2794 stcb->asoc.send_sack = 1;
2796 /* Start a sack timer or QUEUE a SACK for sending */
2797 sctp_sack_check(stcb, was_a_gap);
2802 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2803 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2805 uint32_t * biggest_newly_acked_tsn,
2806 uint32_t * this_sack_lowest_newack,
2809 struct sctp_tmit_chunk *tp1;
2810 unsigned int theTSN;
2811 int j, wake_him = 0, circled = 0;
2813 /* Recover the tp1 we last saw */
2816 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2818 for (j = frag_strt; j <= frag_end; j++) {
2819 theTSN = j + last_tsn;
2821 if (tp1->rec.data.doing_fast_retransmit)
2825 * CMT: CUCv2 algorithm. For each TSN being
2826 * processed from the sent queue, track the
2827 * next expected pseudo-cumack, or
2828 * rtx_pseudo_cumack, if required. Separate
2829 * cumack trackers for first transmissions,
2830 * and retransmissions.
2832 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2833 (tp1->snd_count == 1)) {
2834 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2835 tp1->whoTo->find_pseudo_cumack = 0;
2837 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2838 (tp1->snd_count > 1)) {
2839 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2840 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2842 if (tp1->rec.data.TSN_seq == theTSN) {
2843 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2845 * must be held until
2848 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2850 * If it is less than RESEND, it is
2851 * now no-longer in flight.
2852 * Higher values may already be set
2853 * via previous Gap Ack Blocks...
2854 * i.e. ACKED or RESEND.
2856 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2857 *biggest_newly_acked_tsn)) {
2858 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2861 * CMT: SFR algo (and HTNA) - set
2862 * saw_newack to 1 for dest being
2863 * newly acked. update
2864 * this_sack_highest_newack if
2867 if (tp1->rec.data.chunk_was_revoked == 0)
2868 tp1->whoTo->saw_newack = 1;
2870 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2871 tp1->whoTo->this_sack_highest_newack)) {
2872 tp1->whoTo->this_sack_highest_newack =
2873 tp1->rec.data.TSN_seq;
2876 * CMT DAC algo: also update
2877 * this_sack_lowest_newack
2879 if (*this_sack_lowest_newack == 0) {
2880 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2881 sctp_log_sack(*this_sack_lowest_newack,
2883 tp1->rec.data.TSN_seq,
2886 SCTP_LOG_TSN_ACKED);
2888 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2891 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2892 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2893 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2894 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2895 * Separate pseudo_cumack trackers for first transmissions and
2898 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2899 if (tp1->rec.data.chunk_was_revoked == 0) {
2900 tp1->whoTo->new_pseudo_cumack = 1;
2902 tp1->whoTo->find_pseudo_cumack = 1;
2904 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2905 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2907 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2908 if (tp1->rec.data.chunk_was_revoked == 0) {
2909 tp1->whoTo->new_pseudo_cumack = 1;
2911 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2913 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2914 sctp_log_sack(*biggest_newly_acked_tsn,
2916 tp1->rec.data.TSN_seq,
2919 SCTP_LOG_TSN_ACKED);
2921 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2922 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2923 tp1->whoTo->flight_size,
2925 (uintptr_t) tp1->whoTo,
2926 tp1->rec.data.TSN_seq);
2928 sctp_flight_size_decrease(tp1);
2929 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2930 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2933 sctp_total_flight_decrease(stcb, tp1);
2935 tp1->whoTo->net_ack += tp1->send_size;
2936 if (tp1->snd_count < 2) {
2938 * True non-retransmited chunk
2940 tp1->whoTo->net_ack2 += tp1->send_size;
2948 sctp_calculate_rto(stcb,
2951 &tp1->sent_rcv_time,
2952 sctp_align_safe_nocopy,
2953 SCTP_RTT_FROM_DATA);
2956 if (tp1->whoTo->rto_needed == 0) {
2957 tp1->whoTo->rto_needed = 1;
2963 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2964 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2965 stcb->asoc.this_sack_highest_gap)) {
2966 stcb->asoc.this_sack_highest_gap =
2967 tp1->rec.data.TSN_seq;
2969 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2970 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2971 #ifdef SCTP_AUDITING_ENABLED
2972 sctp_audit_log(0xB2,
2973 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2978 * All chunks NOT UNSENT fall through here and are marked
2979 * (leave PR-SCTP ones that are to skip alone though)
2981 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2982 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2983 tp1->sent = SCTP_DATAGRAM_MARKED;
2985 if (tp1->rec.data.chunk_was_revoked) {
2986 /* deflate the cwnd */
2987 tp1->whoTo->cwnd -= tp1->book_size;
2988 tp1->rec.data.chunk_was_revoked = 0;
2990 /* NR Sack code here */
2992 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2993 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2994 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2997 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3000 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3006 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3007 sctp_m_freem(tp1->data);
3014 } /* if (tp1->TSN_seq == theTSN) */
3015 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3018 tp1 = TAILQ_NEXT(tp1, sctp_next);
3019 if ((tp1 == NULL) && (circled == 0)) {
3021 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3023 } /* end while (tp1) */
3026 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3028 /* In case the fragments were not in order we must reset */
3029 } /* end for (j = fragStart */
3031 return (wake_him); /* Return value only used for nr-sack */
3036 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3037 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3038 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3039 int num_seg, int num_nr_seg, int *rto_ok)
3041 struct sctp_gap_ack_block *frag, block;
3042 struct sctp_tmit_chunk *tp1;
3047 uint16_t frag_strt, frag_end, prev_frag_end;
3049 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3053 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3056 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3058 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3059 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3060 *offset += sizeof(block);
3062 return (chunk_freed);
3064 frag_strt = ntohs(frag->start);
3065 frag_end = ntohs(frag->end);
3067 if (frag_strt > frag_end) {
3068 /* This gap report is malformed, skip it. */
3071 if (frag_strt <= prev_frag_end) {
3072 /* This gap report is not in order, so restart. */
3073 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3075 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3076 *biggest_tsn_acked = last_tsn + frag_end;
3083 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3084 non_revocable, &num_frs, biggest_newly_acked_tsn,
3085 this_sack_lowest_newack, rto_ok)) {
3088 prev_frag_end = frag_end;
3090 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3092 sctp_log_fr(*biggest_tsn_acked,
3093 *biggest_newly_acked_tsn,
3094 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3096 return (chunk_freed);
3100 sctp_check_for_revoked(struct sctp_tcb *stcb,
3101 struct sctp_association *asoc, uint32_t cumack,
3102 uint32_t biggest_tsn_acked)
3104 struct sctp_tmit_chunk *tp1;
3106 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3107 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3109 * ok this guy is either ACK or MARKED. If it is
3110 * ACKED it has been previously acked but not this
3111 * time i.e. revoked. If it is MARKED it was ACK'ed
3114 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3117 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3118 /* it has been revoked */
3119 tp1->sent = SCTP_DATAGRAM_SENT;
3120 tp1->rec.data.chunk_was_revoked = 1;
3122 * We must add this stuff back in to assure
3123 * timers and such get started.
3125 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3126 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3127 tp1->whoTo->flight_size,
3129 (uintptr_t) tp1->whoTo,
3130 tp1->rec.data.TSN_seq);
3132 sctp_flight_size_increase(tp1);
3133 sctp_total_flight_increase(stcb, tp1);
3135 * We inflate the cwnd to compensate for our
3136 * artificial inflation of the flight_size.
3138 tp1->whoTo->cwnd += tp1->book_size;
3139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3140 sctp_log_sack(asoc->last_acked_seq,
3142 tp1->rec.data.TSN_seq,
3145 SCTP_LOG_TSN_REVOKED);
3147 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3148 /* it has been re-acked in this SACK */
3149 tp1->sent = SCTP_DATAGRAM_ACKED;
3152 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3159 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3160 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3162 struct sctp_tmit_chunk *tp1;
3163 int strike_flag = 0;
3165 int tot_retrans = 0;
3166 uint32_t sending_seq;
3167 struct sctp_nets *net;
3168 int num_dests_sacked = 0;
3171 * select the sending_seq, this is either the next thing ready to be
3172 * sent but not transmitted, OR, the next seq we assign.
3174 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3176 sending_seq = asoc->sending_seq;
3178 sending_seq = tp1->rec.data.TSN_seq;
3181 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3182 if ((asoc->sctp_cmt_on_off > 0) &&
3183 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3184 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3185 if (net->saw_newack)
3189 if (stcb->asoc.peer_supports_prsctp) {
3190 (void)SCTP_GETTIME_TIMEVAL(&now);
3192 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3194 if (tp1->no_fr_allowed) {
3195 /* this one had a timeout or something */
3198 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3199 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3200 sctp_log_fr(biggest_tsn_newly_acked,
3201 tp1->rec.data.TSN_seq,
3203 SCTP_FR_LOG_CHECK_STRIKE);
3205 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3206 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3210 if (stcb->asoc.peer_supports_prsctp) {
3211 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3212 /* Is it expired? */
3213 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3214 /* Yes so drop it */
3215 if (tp1->data != NULL) {
3216 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3217 SCTP_SO_NOT_LOCKED);
3223 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3224 /* we are beyond the tsn in the sack */
3227 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3228 /* either a RESEND, ACKED, or MARKED */
3230 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3231 /* Continue strikin FWD-TSN chunks */
3232 tp1->rec.data.fwd_tsn_cnt++;
3237 * CMT : SFR algo (covers part of DAC and HTNA as well)
3239 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3241 * No new acks were receieved for data sent to this
3242 * dest. Therefore, according to the SFR algo for
3243 * CMT, no data sent to this dest can be marked for
3244 * FR using this SACK.
3247 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3248 tp1->whoTo->this_sack_highest_newack)) {
3250 * CMT: New acks were receieved for data sent to
3251 * this dest. But no new acks were seen for data
3252 * sent after tp1. Therefore, according to the SFR
3253 * algo for CMT, tp1 cannot be marked for FR using
3254 * this SACK. This step covers part of the DAC algo
3255 * and the HTNA algo as well.
3260 * Here we check to see if we were have already done a FR
3261 * and if so we see if the biggest TSN we saw in the sack is
3262 * smaller than the recovery point. If so we don't strike
3263 * the tsn... otherwise we CAN strike the TSN.
3266 * @@@ JRI: Check for CMT if (accum_moved &&
3267 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3270 if (accum_moved && asoc->fast_retran_loss_recovery) {
3272 * Strike the TSN if in fast-recovery and cum-ack
3275 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3276 sctp_log_fr(biggest_tsn_newly_acked,
3277 tp1->rec.data.TSN_seq,
3279 SCTP_FR_LOG_STRIKE_CHUNK);
3281 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3284 if ((asoc->sctp_cmt_on_off > 0) &&
3285 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3287 * CMT DAC algorithm: If SACK flag is set to
3288 * 0, then lowest_newack test will not pass
3289 * because it would have been set to the
3290 * cumack earlier. If not already to be
3291 * rtx'd, If not a mixed sack and if tp1 is
3292 * not between two sacked TSNs, then mark by
3293 * one more. NOTE that we are marking by one
3294 * additional time since the SACK DAC flag
3295 * indicates that two packets have been
3296 * received after this missing TSN.
3298 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3299 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3301 sctp_log_fr(16 + num_dests_sacked,
3302 tp1->rec.data.TSN_seq,
3304 SCTP_FR_LOG_STRIKE_CHUNK);
3309 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3310 (asoc->sctp_cmt_on_off == 0)) {
3312 * For those that have done a FR we must take
3313 * special consideration if we strike. I.e the
3314 * biggest_newly_acked must be higher than the
3315 * sending_seq at the time we did the FR.
3318 #ifdef SCTP_FR_TO_ALTERNATE
3320 * If FR's go to new networks, then we must only do
3321 * this for singly homed asoc's. However if the FR's
3322 * go to the same network (Armando's work) then its
3323 * ok to FR multiple times.
3331 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3332 tp1->rec.data.fast_retran_tsn)) {
3334 * Strike the TSN, since this ack is
3335 * beyond where things were when we
3338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3339 sctp_log_fr(biggest_tsn_newly_acked,
3340 tp1->rec.data.TSN_seq,
3342 SCTP_FR_LOG_STRIKE_CHUNK);
3344 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3348 if ((asoc->sctp_cmt_on_off > 0) &&
3349 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3351 * CMT DAC algorithm: If
3352 * SACK flag is set to 0,
3353 * then lowest_newack test
3354 * will not pass because it
3355 * would have been set to
3356 * the cumack earlier. If
3357 * not already to be rtx'd,
3358 * If not a mixed sack and
3359 * if tp1 is not between two
3360 * sacked TSNs, then mark by
3361 * one more. NOTE that we
3362 * are marking by one
3363 * additional time since the
3364 * SACK DAC flag indicates
3365 * that two packets have
3366 * been received after this
3369 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3370 (num_dests_sacked == 1) &&
3371 SCTP_TSN_GT(this_sack_lowest_newack,
3372 tp1->rec.data.TSN_seq)) {
3373 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3374 sctp_log_fr(32 + num_dests_sacked,
3375 tp1->rec.data.TSN_seq,
3377 SCTP_FR_LOG_STRIKE_CHUNK);
3379 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3387 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3390 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3391 biggest_tsn_newly_acked)) {
3393 * We don't strike these: This is the HTNA
3394 * algorithm i.e. we don't strike If our TSN is
3395 * larger than the Highest TSN Newly Acked.
3399 /* Strike the TSN */
3400 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3401 sctp_log_fr(biggest_tsn_newly_acked,
3402 tp1->rec.data.TSN_seq,
3404 SCTP_FR_LOG_STRIKE_CHUNK);
3406 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3409 if ((asoc->sctp_cmt_on_off > 0) &&
3410 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3412 * CMT DAC algorithm: If SACK flag is set to
3413 * 0, then lowest_newack test will not pass
3414 * because it would have been set to the
3415 * cumack earlier. If not already to be
3416 * rtx'd, If not a mixed sack and if tp1 is
3417 * not between two sacked TSNs, then mark by
3418 * one more. NOTE that we are marking by one
3419 * additional time since the SACK DAC flag
3420 * indicates that two packets have been
3421 * received after this missing TSN.
3423 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3424 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3425 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3426 sctp_log_fr(48 + num_dests_sacked,
3427 tp1->rec.data.TSN_seq,
3429 SCTP_FR_LOG_STRIKE_CHUNK);
3435 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3436 struct sctp_nets *alt;
3438 /* fix counts and things */
3439 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3440 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3441 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3443 (uintptr_t) tp1->whoTo,
3444 tp1->rec.data.TSN_seq);
3447 tp1->whoTo->net_ack++;
3448 sctp_flight_size_decrease(tp1);
3449 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3450 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3454 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3455 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3456 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3458 /* add back to the rwnd */
3459 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3461 /* remove from the total flight */
3462 sctp_total_flight_decrease(stcb, tp1);
3464 if ((stcb->asoc.peer_supports_prsctp) &&
3465 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3467 * Has it been retransmitted tv_sec times? -
3468 * we store the retran count there.
3470 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3471 /* Yes, so drop it */
3472 if (tp1->data != NULL) {
3473 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3474 SCTP_SO_NOT_LOCKED);
3476 /* Make sure to flag we had a FR */
3477 tp1->whoTo->net_ack++;
3482 * SCTP_PRINTF("OK, we are now ready to FR this
3485 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3486 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3490 /* This is a subsequent FR */
3491 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3493 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3494 if (asoc->sctp_cmt_on_off > 0) {
3496 * CMT: Using RTX_SSTHRESH policy for CMT.
3497 * If CMT is being used, then pick dest with
3498 * largest ssthresh for any retransmission.
3500 tp1->no_fr_allowed = 1;
3502 /* sa_ignore NO_NULL_CHK */
3503 if (asoc->sctp_cmt_pf > 0) {
3505 * JRS 5/18/07 - If CMT PF is on,
3506 * use the PF version of
3509 alt = sctp_find_alternate_net(stcb, alt, 2);
3512 * JRS 5/18/07 - If only CMT is on,
3513 * use the CMT version of
3516 /* sa_ignore NO_NULL_CHK */
3517 alt = sctp_find_alternate_net(stcb, alt, 1);
3523 * CUCv2: If a different dest is picked for
3524 * the retransmission, then new
3525 * (rtx-)pseudo_cumack needs to be tracked
3526 * for orig dest. Let CUCv2 track new (rtx-)
3527 * pseudo-cumack always.
3530 tp1->whoTo->find_pseudo_cumack = 1;
3531 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3533 } else {/* CMT is OFF */
3535 #ifdef SCTP_FR_TO_ALTERNATE
3536 /* Can we find an alternate? */
3537 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3540 * default behavior is to NOT retransmit
3541 * FR's to an alternate. Armando Caro's
3542 * paper details why.
3548 tp1->rec.data.doing_fast_retransmit = 1;
3550 /* mark the sending seq for possible subsequent FR's */
3552 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3553 * (uint32_t)tpi->rec.data.TSN_seq);
3555 if (TAILQ_EMPTY(&asoc->send_queue)) {
3557 * If the queue of send is empty then its
3558 * the next sequence number that will be
3559 * assigned so we subtract one from this to
3560 * get the one we last sent.
3562 tp1->rec.data.fast_retran_tsn = sending_seq;
3565 * If there are chunks on the send queue
3566 * (unsent data that has made it from the
3567 * stream queues but not out the door, we
3568 * take the first one (which will have the
3569 * lowest TSN) and subtract one to get the
3572 struct sctp_tmit_chunk *ttt;
3574 ttt = TAILQ_FIRST(&asoc->send_queue);
3575 tp1->rec.data.fast_retran_tsn =
3576 ttt->rec.data.TSN_seq;
3581 * this guy had a RTO calculation pending on
3584 if ((tp1->whoTo != NULL) &&
3585 (tp1->whoTo->rto_needed == 0)) {
3586 tp1->whoTo->rto_needed = 1;
3590 if (alt != tp1->whoTo) {
3591 /* yes, there is an alternate. */
3592 sctp_free_remote_addr(tp1->whoTo);
3593 /* sa_ignore FREED_MEMORY */
3595 atomic_add_int(&alt->ref_count, 1);
3601 struct sctp_tmit_chunk *
3602 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3603 struct sctp_association *asoc)
3605 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3609 if (asoc->peer_supports_prsctp == 0) {
3612 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3613 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3614 tp1->sent != SCTP_DATAGRAM_RESEND &&
3615 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3616 /* no chance to advance, out of here */
3619 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3620 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3621 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3622 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3623 asoc->advanced_peer_ack_point,
3624 tp1->rec.data.TSN_seq, 0, 0);
3627 if (!PR_SCTP_ENABLED(tp1->flags)) {
3629 * We can't fwd-tsn past any that are reliable aka
3630 * retransmitted until the asoc fails.
3635 (void)SCTP_GETTIME_TIMEVAL(&now);
3639 * now we got a chunk which is marked for another
3640 * retransmission to a PR-stream but has run out its chances
3641 * already maybe OR has been marked to skip now. Can we skip
3642 * it if its a resend?
3644 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3645 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3647 * Now is this one marked for resend and its time is
3650 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3651 /* Yes so drop it */
3653 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3654 1, SCTP_SO_NOT_LOCKED);
3658 * No, we are done when hit one for resend
3659 * whos time as not expired.
3665 * Ok now if this chunk is marked to drop it we can clean up
3666 * the chunk, advance our peer ack point and we can check
3669 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3670 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3671 /* advance PeerAckPoint goes forward */
3672 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3673 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3675 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3676 /* No update but we do save the chk */
3681 * If it is still in RESEND we can advance no
3691 sctp_fs_audit(struct sctp_association *asoc)
3693 struct sctp_tmit_chunk *chk;
3694 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3695 int entry_flight, entry_cnt, ret;
3697 entry_flight = asoc->total_flight;
3698 entry_cnt = asoc->total_flight_count;
3701 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3704 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3705 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3706 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3707 chk->rec.data.TSN_seq,
3711 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3713 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3715 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3722 if ((inflight > 0) || (inbetween > 0)) {
3724 panic("Flight size-express incorrect? \n");
3726 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3727 entry_flight, entry_cnt);
3729 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3730 inflight, inbetween, resend, above, acked);
3739 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3740 struct sctp_association *asoc,
3741 struct sctp_tmit_chunk *tp1)
3743 tp1->window_probe = 0;
3744 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3745 /* TSN's skipped we do NOT move back. */
3746 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3747 tp1->whoTo->flight_size,
3749 (uintptr_t) tp1->whoTo,
3750 tp1->rec.data.TSN_seq);
3753 /* First setup this by shrinking flight */
3754 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3755 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3758 sctp_flight_size_decrease(tp1);
3759 sctp_total_flight_decrease(stcb, tp1);
3760 /* Now mark for resend */
3761 tp1->sent = SCTP_DATAGRAM_RESEND;
3762 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3764 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3765 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3766 tp1->whoTo->flight_size,
3768 (uintptr_t) tp1->whoTo,
3769 tp1->rec.data.TSN_seq);
3774 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3775 uint32_t rwnd, int *abort_now, int ecne_seen)
3777 struct sctp_nets *net;
3778 struct sctp_association *asoc;
3779 struct sctp_tmit_chunk *tp1, *tp2;
3781 int win_probe_recovery = 0;
3782 int win_probe_recovered = 0;
3783 int j, done_once = 0;
3786 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3787 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3788 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3790 SCTP_TCB_LOCK_ASSERT(stcb);
3791 #ifdef SCTP_ASOCLOG_OF_TSNS
3792 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3793 stcb->asoc.cumack_log_at++;
3794 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3795 stcb->asoc.cumack_log_at = 0;
3799 old_rwnd = asoc->peers_rwnd;
3800 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3803 } else if (asoc->last_acked_seq == cumack) {
3804 /* Window update sack */
3805 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3806 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3807 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3808 /* SWS sender side engages */
3809 asoc->peers_rwnd = 0;
3811 if (asoc->peers_rwnd > old_rwnd) {
3816 /* First setup for CC stuff */
3817 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3818 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3819 /* Drag along the window_tsn for cwr's */
3820 net->cwr_window_tsn = cumack;
3822 net->prev_cwnd = net->cwnd;
3827 * CMT: Reset CUC and Fast recovery algo variables before
3830 net->new_pseudo_cumack = 0;
3831 net->will_exit_fast_recovery = 0;
3832 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3833 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3836 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3839 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3840 tp1 = TAILQ_LAST(&asoc->sent_queue,
3841 sctpchunk_listhead);
3842 send_s = tp1->rec.data.TSN_seq + 1;
3844 send_s = asoc->sending_seq;
3846 if (SCTP_TSN_GE(cumack, send_s)) {
3852 panic("Impossible sack 1");
3857 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3858 0, M_DONTWAIT, 1, MT_DATA);
3860 struct sctp_paramhdr *ph;
3863 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3865 ph = mtod(oper, struct sctp_paramhdr *);
3866 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3867 ph->param_length = htons(SCTP_BUF_LEN(oper));
3868 ippp = (uint32_t *) (ph + 1);
3869 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3871 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3872 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
3877 asoc->this_sack_highest_gap = cumack;
3878 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3879 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3880 stcb->asoc.overall_error_count,
3882 SCTP_FROM_SCTP_INDATA,
3885 stcb->asoc.overall_error_count = 0;
3886 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3887 /* process the new consecutive TSN first */
3888 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3889 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3890 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3891 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3893 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3895 * If it is less than ACKED, it is
3896 * now no-longer in flight. Higher
3897 * values may occur during marking
3899 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3900 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3901 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3902 tp1->whoTo->flight_size,
3904 (uintptr_t) tp1->whoTo,
3905 tp1->rec.data.TSN_seq);
3907 sctp_flight_size_decrease(tp1);
3908 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3909 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3912 /* sa_ignore NO_NULL_CHK */
3913 sctp_total_flight_decrease(stcb, tp1);
3915 tp1->whoTo->net_ack += tp1->send_size;
3916 if (tp1->snd_count < 2) {
3918 * True non-retransmited
3921 tp1->whoTo->net_ack2 +=
3924 /* update RTO too? */
3933 sctp_calculate_rto(stcb,
3935 &tp1->sent_rcv_time,
3936 sctp_align_safe_nocopy,
3937 SCTP_RTT_FROM_DATA);
3940 if (tp1->whoTo->rto_needed == 0) {
3941 tp1->whoTo->rto_needed = 1;
3947 * CMT: CUCv2 algorithm. From the
3948 * cumack'd TSNs, for each TSN being
3949 * acked for the first time, set the
3950 * following variables for the
3951 * corresp destination.
3952 * new_pseudo_cumack will trigger a
3954 * find_(rtx_)pseudo_cumack will
3955 * trigger search for the next
3956 * expected (rtx-)pseudo-cumack.
3958 tp1->whoTo->new_pseudo_cumack = 1;
3959 tp1->whoTo->find_pseudo_cumack = 1;
3960 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3962 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3963 /* sa_ignore NO_NULL_CHK */
3964 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3967 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3968 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3970 if (tp1->rec.data.chunk_was_revoked) {
3971 /* deflate the cwnd */
3972 tp1->whoTo->cwnd -= tp1->book_size;
3973 tp1->rec.data.chunk_was_revoked = 0;
3975 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3976 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3977 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3980 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3984 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3986 /* sa_ignore NO_NULL_CHK */
3987 sctp_free_bufspace(stcb, asoc, tp1, 1);
3988 sctp_m_freem(tp1->data);
3991 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3992 sctp_log_sack(asoc->last_acked_seq,
3994 tp1->rec.data.TSN_seq,
3997 SCTP_LOG_FREE_SENT);
3999 asoc->sent_queue_cnt--;
4000 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4007 /* sa_ignore NO_NULL_CHK */
4008 if (stcb->sctp_socket) {
4009 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4013 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4014 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4015 /* sa_ignore NO_NULL_CHK */
4016 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4018 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4019 so = SCTP_INP_SO(stcb->sctp_ep);
4020 atomic_add_int(&stcb->asoc.refcnt, 1);
4021 SCTP_TCB_UNLOCK(stcb);
4022 SCTP_SOCKET_LOCK(so, 1);
4023 SCTP_TCB_LOCK(stcb);
4024 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4025 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4026 /* assoc was freed while we were unlocked */
4027 SCTP_SOCKET_UNLOCK(so, 1);
4031 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4032 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4033 SCTP_SOCKET_UNLOCK(so, 1);
4036 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4037 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4041 /* JRS - Use the congestion control given in the CC module */
4042 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4043 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4044 if (net->net_ack2 > 0) {
4046 * Karn's rule applies to clearing error
4047 * count, this is optional.
4049 net->error_count = 0;
4050 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4051 /* addr came good */
4052 net->dest_state |= SCTP_ADDR_REACHABLE;
4053 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4054 0, (void *)net, SCTP_SO_NOT_LOCKED);
4056 if (net == stcb->asoc.primary_destination) {
4057 if (stcb->asoc.alternate) {
4059 * release the alternate,
4062 sctp_free_remote_addr(stcb->asoc.alternate);
4063 stcb->asoc.alternate = NULL;
4066 if (net->dest_state & SCTP_ADDR_PF) {
4067 net->dest_state &= ~SCTP_ADDR_PF;
4068 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4069 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4070 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4071 /* Done with this net */
4074 /* restore any doubled timers */
4075 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4076 if (net->RTO < stcb->asoc.minrto) {
4077 net->RTO = stcb->asoc.minrto;
4079 if (net->RTO > stcb->asoc.maxrto) {
4080 net->RTO = stcb->asoc.maxrto;
4084 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4086 asoc->last_acked_seq = cumack;
4088 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4089 /* nothing left in-flight */
4090 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4091 net->flight_size = 0;
4092 net->partial_bytes_acked = 0;
4094 asoc->total_flight = 0;
4095 asoc->total_flight_count = 0;
4098 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4099 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4100 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4101 /* SWS sender side engages */
4102 asoc->peers_rwnd = 0;
4104 if (asoc->peers_rwnd > old_rwnd) {
4105 win_probe_recovery = 1;
4107 /* Now assure a timer where data is queued at */
4110 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4113 if (win_probe_recovery && (net->window_probe)) {
4114 win_probe_recovered = 1;
4116 * Find first chunk that was used with window probe
4117 * and clear the sent
4119 /* sa_ignore FREED_MEMORY */
4120 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4121 if (tp1->window_probe) {
4122 /* move back to data send queue */
4123 sctp_window_probe_recovery(stcb, asoc, tp1);
4128 if (net->RTO == 0) {
4129 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4131 to_ticks = MSEC_TO_TICKS(net->RTO);
4133 if (net->flight_size) {
4135 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4136 sctp_timeout_handler, &net->rxt_timer);
4137 if (net->window_probe) {
4138 net->window_probe = 0;
4141 if (net->window_probe) {
4143 * In window probes we must assure a timer
4144 * is still running there
4146 net->window_probe = 0;
4147 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4148 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4149 sctp_timeout_handler, &net->rxt_timer);
4151 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4152 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4154 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4159 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4160 (asoc->sent_queue_retran_cnt == 0) &&
4161 (win_probe_recovered == 0) &&
4164 * huh, this should not happen unless all packets are
4165 * PR-SCTP and marked to skip of course.
4167 if (sctp_fs_audit(asoc)) {
4168 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4169 net->flight_size = 0;
4171 asoc->total_flight = 0;
4172 asoc->total_flight_count = 0;
4173 asoc->sent_queue_retran_cnt = 0;
4174 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4175 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4176 sctp_flight_size_increase(tp1);
4177 sctp_total_flight_increase(stcb, tp1);
4178 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4179 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4186 /**********************************/
4187 /* Now what about shutdown issues */
4188 /**********************************/
4189 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4190 /* nothing left on sendqueue.. consider done */
4192 if ((asoc->stream_queue_cnt == 1) &&
4193 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4194 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4195 (asoc->locked_on_sending)
4197 struct sctp_stream_queue_pending *sp;
4200 * I may be in a state where we got all across.. but
4201 * cannot write more due to a shutdown... we abort
4202 * since the user did not indicate EOR in this case.
4203 * The sp will be cleaned during free of the asoc.
4205 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4207 if ((sp) && (sp->length == 0)) {
4208 /* Let cleanup code purge it */
4209 if (sp->msg_is_complete) {
4210 asoc->stream_queue_cnt--;
4212 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4213 asoc->locked_on_sending = NULL;
4214 asoc->stream_queue_cnt--;
4218 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4219 (asoc->stream_queue_cnt == 0)) {
4220 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4221 /* Need to abort here */
4227 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4228 0, M_DONTWAIT, 1, MT_DATA);
4230 struct sctp_paramhdr *ph;
4232 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4233 ph = mtod(oper, struct sctp_paramhdr *);
4234 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4235 ph->param_length = htons(SCTP_BUF_LEN(oper));
4237 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4238 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4240 struct sctp_nets *netp;
4242 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4243 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4244 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4246 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4247 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4248 sctp_stop_timers_for_shutdown(stcb);
4249 if (asoc->alternate) {
4250 netp = asoc->alternate;
4252 netp = asoc->primary_destination;
4254 sctp_send_shutdown(stcb, netp);
4255 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4256 stcb->sctp_ep, stcb, netp);
4257 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4258 stcb->sctp_ep, stcb, netp);
4260 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4261 (asoc->stream_queue_cnt == 0)) {
4262 struct sctp_nets *netp;
4264 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4267 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4268 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4269 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4270 sctp_stop_timers_for_shutdown(stcb);
4271 if (asoc->alternate) {
4272 netp = asoc->alternate;
4274 netp = asoc->primary_destination;
4276 sctp_send_shutdown_ack(stcb, netp);
4277 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4278 stcb->sctp_ep, stcb, netp);
4281 /*********************************************/
4282 /* Here we perform PR-SCTP procedures */
4284 /*********************************************/
4285 /* C1. update advancedPeerAckPoint */
4286 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4287 asoc->advanced_peer_ack_point = cumack;
4289 /* PR-Sctp issues need to be addressed too */
4290 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4291 struct sctp_tmit_chunk *lchk;
4292 uint32_t old_adv_peer_ack_point;
4294 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4295 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4296 /* C3. See if we need to send a Fwd-TSN */
4297 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4299 * ISSUE with ECN, see FWD-TSN processing.
4301 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4302 send_forward_tsn(stcb, asoc);
4304 /* try to FR fwd-tsn's that get lost too */
4305 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4306 send_forward_tsn(stcb, asoc);
4311 /* Assure a timer is up */
4312 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4313 stcb->sctp_ep, stcb, lchk->whoTo);
4316 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4317 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4319 stcb->asoc.peers_rwnd,
4320 stcb->asoc.total_flight,
4321 stcb->asoc.total_output_queue_size);
4326 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4327 struct sctp_tcb *stcb,
4328 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4329 int *abort_now, uint8_t flags,
4330 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4332 struct sctp_association *asoc;
4333 struct sctp_tmit_chunk *tp1, *tp2;
4334 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4335 uint16_t wake_him = 0;
4336 uint32_t send_s = 0;
4338 int accum_moved = 0;
4339 int will_exit_fast_recovery = 0;
4340 uint32_t a_rwnd, old_rwnd;
4341 int win_probe_recovery = 0;
4342 int win_probe_recovered = 0;
4343 struct sctp_nets *net = NULL;
4346 uint8_t reneged_all = 0;
4347 uint8_t cmt_dac_flag;
4350 * we take any chance we can to service our queues since we cannot
4351 * get awoken when the socket is read from :<
4354 * Now perform the actual SACK handling: 1) Verify that it is not an
4355 * old sack, if so discard. 2) If there is nothing left in the send
4356 * queue (cum-ack is equal to last acked) then you have a duplicate
4357 * too, update any rwnd change and verify no timers are running.
4358 * then return. 3) Process any new consequtive data i.e. cum-ack
4359 * moved process these first and note that it moved. 4) Process any
4360 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4361 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4362 * sync up flightsizes and things, stop all timers and also check
4363 * for shutdown_pending state. If so then go ahead and send off the
4364 * shutdown. If in shutdown recv, send off the shutdown-ack and
4365 * start that timer, Ret. 9) Strike any non-acked things and do FR
4366 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4367 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4368 * if in shutdown_recv state.
4370 SCTP_TCB_LOCK_ASSERT(stcb);
4372 this_sack_lowest_newack = 0;
4373 SCTP_STAT_INCR(sctps_slowpath_sack);
4375 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4376 #ifdef SCTP_ASOCLOG_OF_TSNS
4377 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4378 stcb->asoc.cumack_log_at++;
4379 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4380 stcb->asoc.cumack_log_at = 0;
4385 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4386 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4387 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4389 old_rwnd = stcb->asoc.peers_rwnd;
4390 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4391 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4392 stcb->asoc.overall_error_count,
4394 SCTP_FROM_SCTP_INDATA,
4397 stcb->asoc.overall_error_count = 0;
4399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4400 sctp_log_sack(asoc->last_acked_seq,
4407 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4409 uint32_t *dupdata, dblock;
4411 for (i = 0; i < num_dup; i++) {
4412 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4413 sizeof(uint32_t), (uint8_t *) & dblock);
4414 if (dupdata == NULL) {
4417 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4420 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4422 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4423 tp1 = TAILQ_LAST(&asoc->sent_queue,
4424 sctpchunk_listhead);
4425 send_s = tp1->rec.data.TSN_seq + 1;
4428 send_s = asoc->sending_seq;
4430 if (SCTP_TSN_GE(cum_ack, send_s)) {
4434 * no way, we have not even sent this TSN out yet.
4435 * Peer is hopelessly messed up with us.
4437 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4440 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4441 tp1->rec.data.TSN_seq, (void *)tp1);
4446 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4447 0, M_DONTWAIT, 1, MT_DATA);
4449 struct sctp_paramhdr *ph;
4452 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4454 ph = mtod(oper, struct sctp_paramhdr *);
4455 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4456 ph->param_length = htons(SCTP_BUF_LEN(oper));
4457 ippp = (uint32_t *) (ph + 1);
4458 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4460 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4461 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4465 /**********************/
4466 /* 1) check the range */
4467 /**********************/
4468 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4469 /* acking something behind */
4472 /* update the Rwnd of the peer */
4473 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4474 TAILQ_EMPTY(&asoc->send_queue) &&
4475 (asoc->stream_queue_cnt == 0)) {
4476 /* nothing left on send/sent and strmq */
4477 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4478 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4479 asoc->peers_rwnd, 0, 0, a_rwnd);
4481 asoc->peers_rwnd = a_rwnd;
4482 if (asoc->sent_queue_retran_cnt) {
4483 asoc->sent_queue_retran_cnt = 0;
4485 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4486 /* SWS sender side engages */
4487 asoc->peers_rwnd = 0;
4489 /* stop any timers */
4490 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4491 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4492 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4493 net->partial_bytes_acked = 0;
4494 net->flight_size = 0;
4496 asoc->total_flight = 0;
4497 asoc->total_flight_count = 0;
4501 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4502 * things. The total byte count acked is tracked in netAckSz AND
4503 * netAck2 is used to track the total bytes acked that are un-
4504 * amibguious and were never retransmitted. We track these on a per
4505 * destination address basis.
4507 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4508 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4509 /* Drag along the window_tsn for cwr's */
4510 net->cwr_window_tsn = cum_ack;
4512 net->prev_cwnd = net->cwnd;
4517 * CMT: Reset CUC and Fast recovery algo variables before
4520 net->new_pseudo_cumack = 0;
4521 net->will_exit_fast_recovery = 0;
4522 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4523 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4526 /* process the new consecutive TSN first */
4527 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4528 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4529 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4531 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4533 * If it is less than ACKED, it is
4534 * now no-longer in flight. Higher
4535 * values may occur during marking
4537 if ((tp1->whoTo->dest_state &
4538 SCTP_ADDR_UNCONFIRMED) &&
4539 (tp1->snd_count < 2)) {
4541 * If there was no retran
4542 * and the address is
4543 * un-confirmed and we sent
4545 * sacked.. its confirmed,
4548 tp1->whoTo->dest_state &=
4549 ~SCTP_ADDR_UNCONFIRMED;
4551 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4552 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4553 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4554 tp1->whoTo->flight_size,
4556 (uintptr_t) tp1->whoTo,
4557 tp1->rec.data.TSN_seq);
4559 sctp_flight_size_decrease(tp1);
4560 sctp_total_flight_decrease(stcb, tp1);
4561 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4562 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4566 tp1->whoTo->net_ack += tp1->send_size;
4568 /* CMT SFR and DAC algos */
4569 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4570 tp1->whoTo->saw_newack = 1;
4572 if (tp1->snd_count < 2) {
4574 * True non-retransmited
4577 tp1->whoTo->net_ack2 +=
4580 /* update RTO too? */
4584 sctp_calculate_rto(stcb,
4586 &tp1->sent_rcv_time,
4587 sctp_align_safe_nocopy,
4588 SCTP_RTT_FROM_DATA);
4591 if (tp1->whoTo->rto_needed == 0) {
4592 tp1->whoTo->rto_needed = 1;
4598 * CMT: CUCv2 algorithm. From the
4599 * cumack'd TSNs, for each TSN being
4600 * acked for the first time, set the
4601 * following variables for the
4602 * corresp destination.
4603 * new_pseudo_cumack will trigger a
4605 * find_(rtx_)pseudo_cumack will
4606 * trigger search for the next
4607 * expected (rtx-)pseudo-cumack.
4609 tp1->whoTo->new_pseudo_cumack = 1;
4610 tp1->whoTo->find_pseudo_cumack = 1;
4611 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4614 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4615 sctp_log_sack(asoc->last_acked_seq,
4617 tp1->rec.data.TSN_seq,
4620 SCTP_LOG_TSN_ACKED);
4622 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4623 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4626 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4627 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4628 #ifdef SCTP_AUDITING_ENABLED
4629 sctp_audit_log(0xB3,
4630 (asoc->sent_queue_retran_cnt & 0x000000ff));
4633 if (tp1->rec.data.chunk_was_revoked) {
4634 /* deflate the cwnd */
4635 tp1->whoTo->cwnd -= tp1->book_size;
4636 tp1->rec.data.chunk_was_revoked = 0;
4638 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4639 tp1->sent = SCTP_DATAGRAM_ACKED;
4646 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4647 /* always set this up to cum-ack */
4648 asoc->this_sack_highest_gap = last_tsn;
4650 if ((num_seg > 0) || (num_nr_seg > 0)) {
4653 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4654 * to be greater than the cumack. Also reset saw_newack to 0
4657 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4658 net->saw_newack = 0;
4659 net->this_sack_highest_newack = last_tsn;
4663 * thisSackHighestGap will increase while handling NEW
4664 * segments this_sack_highest_newack will increase while
4665 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4666 * used for CMT DAC algo. saw_newack will also change.
4668 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4669 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4670 num_seg, num_nr_seg, &rto_ok)) {
4673 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4675 * validate the biggest_tsn_acked in the gap acks if
4676 * strict adherence is wanted.
4678 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4680 * peer is either confused or we are under
4681 * attack. We must abort.
4683 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4684 biggest_tsn_acked, send_s);
4689 /*******************************************/
4690 /* cancel ALL T3-send timer if accum moved */
4691 /*******************************************/
4692 if (asoc->sctp_cmt_on_off > 0) {
4693 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4694 if (net->new_pseudo_cumack)
4695 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4697 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4702 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4703 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4704 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4708 /********************************************/
4709 /* drop the acked chunks from the sentqueue */
4710 /********************************************/
4711 asoc->last_acked_seq = cum_ack;
4713 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4714 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4717 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4718 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4719 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4722 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4726 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4727 if (tp1->pr_sctp_on) {
4728 if (asoc->pr_sctp_cnt != 0)
4729 asoc->pr_sctp_cnt--;
4731 asoc->sent_queue_cnt--;
4733 /* sa_ignore NO_NULL_CHK */
4734 sctp_free_bufspace(stcb, asoc, tp1, 1);
4735 sctp_m_freem(tp1->data);
4737 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4738 asoc->sent_queue_cnt_removeable--;
4741 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4742 sctp_log_sack(asoc->last_acked_seq,
4744 tp1->rec.data.TSN_seq,
4747 SCTP_LOG_FREE_SENT);
4749 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4752 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4754 panic("Warning flight size is postive and should be 0");
4756 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4757 asoc->total_flight);
4759 asoc->total_flight = 0;
4761 /* sa_ignore NO_NULL_CHK */
4762 if ((wake_him) && (stcb->sctp_socket)) {
4763 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4767 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4768 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4769 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4771 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4772 so = SCTP_INP_SO(stcb->sctp_ep);
4773 atomic_add_int(&stcb->asoc.refcnt, 1);
4774 SCTP_TCB_UNLOCK(stcb);
4775 SCTP_SOCKET_LOCK(so, 1);
4776 SCTP_TCB_LOCK(stcb);
4777 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4778 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4779 /* assoc was freed while we were unlocked */
4780 SCTP_SOCKET_UNLOCK(so, 1);
4784 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4785 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4786 SCTP_SOCKET_UNLOCK(so, 1);
4789 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4790 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4794 if (asoc->fast_retran_loss_recovery && accum_moved) {
4795 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4796 /* Setup so we will exit RFC2582 fast recovery */
4797 will_exit_fast_recovery = 1;
4801 * Check for revoked fragments:
4803 * if Previous sack - Had no frags then we can't have any revoked if
4804 * Previous sack - Had frag's then - If we now have frags aka
4805 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4806 * some of them. else - The peer revoked all ACKED fragments, since
4807 * we had some before and now we have NONE.
4811 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4812 asoc->saw_sack_with_frags = 1;
4813 } else if (asoc->saw_sack_with_frags) {
4814 int cnt_revoked = 0;
4816 /* Peer revoked all dg's marked or acked */
4817 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4818 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4819 tp1->sent = SCTP_DATAGRAM_SENT;
4820 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4821 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4822 tp1->whoTo->flight_size,
4824 (uintptr_t) tp1->whoTo,
4825 tp1->rec.data.TSN_seq);
4827 sctp_flight_size_increase(tp1);
4828 sctp_total_flight_increase(stcb, tp1);
4829 tp1->rec.data.chunk_was_revoked = 1;
4831 * To ensure that this increase in
4832 * flightsize, which is artificial, does not
4833 * throttle the sender, we also increase the
4834 * cwnd artificially.
4836 tp1->whoTo->cwnd += tp1->book_size;
4843 asoc->saw_sack_with_frags = 0;
4846 asoc->saw_sack_with_nr_frags = 1;
4848 asoc->saw_sack_with_nr_frags = 0;
4850 /* JRS - Use the congestion control given in the CC module */
4851 if (ecne_seen == 0) {
4852 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4853 if (net->net_ack2 > 0) {
4855 * Karn's rule applies to clearing error
4856 * count, this is optional.
4858 net->error_count = 0;
4859 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4860 /* addr came good */
4861 net->dest_state |= SCTP_ADDR_REACHABLE;
4862 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4863 0, (void *)net, SCTP_SO_NOT_LOCKED);
4865 if (net == stcb->asoc.primary_destination) {
4866 if (stcb->asoc.alternate) {
4868 * release the alternate,
4871 sctp_free_remote_addr(stcb->asoc.alternate);
4872 stcb->asoc.alternate = NULL;
4875 if (net->dest_state & SCTP_ADDR_PF) {
4876 net->dest_state &= ~SCTP_ADDR_PF;
4877 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4878 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4879 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4880 /* Done with this net */
4883 /* restore any doubled timers */
4884 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4885 if (net->RTO < stcb->asoc.minrto) {
4886 net->RTO = stcb->asoc.minrto;
4888 if (net->RTO > stcb->asoc.maxrto) {
4889 net->RTO = stcb->asoc.maxrto;
4893 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4895 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4896 /* nothing left in-flight */
4897 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4898 /* stop all timers */
4899 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4900 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4901 net->flight_size = 0;
4902 net->partial_bytes_acked = 0;
4904 asoc->total_flight = 0;
4905 asoc->total_flight_count = 0;
4907 /**********************************/
4908 /* Now what about shutdown issues */
4909 /**********************************/
4910 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4911 /* nothing left on sendqueue.. consider done */
4912 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4913 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4914 asoc->peers_rwnd, 0, 0, a_rwnd);
4916 asoc->peers_rwnd = a_rwnd;
4917 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4918 /* SWS sender side engages */
4919 asoc->peers_rwnd = 0;
4922 if ((asoc->stream_queue_cnt == 1) &&
4923 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4924 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4925 (asoc->locked_on_sending)
4927 struct sctp_stream_queue_pending *sp;
4930 * I may be in a state where we got all across.. but
4931 * cannot write more due to a shutdown... we abort
4932 * since the user did not indicate EOR in this case.
4934 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4936 if ((sp) && (sp->length == 0)) {
4937 asoc->locked_on_sending = NULL;
4938 if (sp->msg_is_complete) {
4939 asoc->stream_queue_cnt--;
4941 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4942 asoc->stream_queue_cnt--;
4946 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4947 (asoc->stream_queue_cnt == 0)) {
4948 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4949 /* Need to abort here */
4955 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4956 0, M_DONTWAIT, 1, MT_DATA);
4958 struct sctp_paramhdr *ph;
4960 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4961 ph = mtod(oper, struct sctp_paramhdr *);
4962 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4963 ph->param_length = htons(SCTP_BUF_LEN(oper));
4965 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4966 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4969 struct sctp_nets *netp;
4971 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4972 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4973 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4975 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4976 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4977 sctp_stop_timers_for_shutdown(stcb);
4978 if (asoc->alternate) {
4979 netp = asoc->alternate;
4981 netp = asoc->primary_destination;
4983 sctp_send_shutdown(stcb, netp);
4984 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4985 stcb->sctp_ep, stcb, netp);
4986 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4987 stcb->sctp_ep, stcb, netp);
4990 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4991 (asoc->stream_queue_cnt == 0)) {
4992 struct sctp_nets *netp;
4994 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4997 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4998 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4999 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5000 sctp_stop_timers_for_shutdown(stcb);
5001 if (asoc->alternate) {
5002 netp = asoc->alternate;
5004 netp = asoc->primary_destination;
5006 sctp_send_shutdown_ack(stcb, netp);
5007 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5008 stcb->sctp_ep, stcb, netp);
5013 * Now here we are going to recycle net_ack for a different use...
5016 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5021 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5022 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5023 * automatically ensure that.
5025 if ((asoc->sctp_cmt_on_off > 0) &&
5026 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5027 (cmt_dac_flag == 0)) {
5028 this_sack_lowest_newack = cum_ack;
5030 if ((num_seg > 0) || (num_nr_seg > 0)) {
5031 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5032 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5034 /* JRS - Use the congestion control given in the CC module */
5035 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5037 /* Now are we exiting loss recovery ? */
5038 if (will_exit_fast_recovery) {
5039 /* Ok, we must exit fast recovery */
5040 asoc->fast_retran_loss_recovery = 0;
5042 if ((asoc->sat_t3_loss_recovery) &&
5043 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5044 /* end satellite t3 loss recovery */
5045 asoc->sat_t3_loss_recovery = 0;
5050 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5051 if (net->will_exit_fast_recovery) {
5052 /* Ok, we must exit fast recovery */
5053 net->fast_retran_loss_recovery = 0;
5057 /* Adjust and set the new rwnd value */
5058 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5059 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5060 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5062 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5063 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5064 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5065 /* SWS sender side engages */
5066 asoc->peers_rwnd = 0;
5068 if (asoc->peers_rwnd > old_rwnd) {
5069 win_probe_recovery = 1;
5072 * Now we must setup so we have a timer up for anyone with
5078 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5079 if (win_probe_recovery && (net->window_probe)) {
5080 win_probe_recovered = 1;
5082 * Find first chunk that was used with
5083 * window probe and clear the event. Put
5084 * it back into the send queue as if has
5087 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5088 if (tp1->window_probe) {
5089 sctp_window_probe_recovery(stcb, asoc, tp1);
5094 if (net->flight_size) {
5096 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5097 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5098 stcb->sctp_ep, stcb, net);
5100 if (net->window_probe) {
5101 net->window_probe = 0;
5104 if (net->window_probe) {
5106 * In window probes we must assure a timer
5107 * is still running there
5109 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5110 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5111 stcb->sctp_ep, stcb, net);
5114 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5115 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5117 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5122 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5123 (asoc->sent_queue_retran_cnt == 0) &&
5124 (win_probe_recovered == 0) &&
5127 * huh, this should not happen unless all packets are
5128 * PR-SCTP and marked to skip of course.
5130 if (sctp_fs_audit(asoc)) {
5131 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5132 net->flight_size = 0;
5134 asoc->total_flight = 0;
5135 asoc->total_flight_count = 0;
5136 asoc->sent_queue_retran_cnt = 0;
5137 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5138 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5139 sctp_flight_size_increase(tp1);
5140 sctp_total_flight_increase(stcb, tp1);
5141 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5142 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5149 /*********************************************/
5150 /* Here we perform PR-SCTP procedures */
5152 /*********************************************/
5153 /* C1. update advancedPeerAckPoint */
5154 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5155 asoc->advanced_peer_ack_point = cum_ack;
5157 /* C2. try to further move advancedPeerAckPoint ahead */
5158 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5159 struct sctp_tmit_chunk *lchk;
5160 uint32_t old_adv_peer_ack_point;
5162 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5163 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5164 /* C3. See if we need to send a Fwd-TSN */
5165 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5167 * ISSUE with ECN, see FWD-TSN processing.
5169 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5170 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5171 0xee, cum_ack, asoc->advanced_peer_ack_point,
5172 old_adv_peer_ack_point);
5174 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5175 send_forward_tsn(stcb, asoc);
5177 /* try to FR fwd-tsn's that get lost too */
5178 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5179 send_forward_tsn(stcb, asoc);
5184 /* Assure a timer is up */
5185 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5186 stcb->sctp_ep, stcb, lchk->whoTo);
5189 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5190 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5192 stcb->asoc.peers_rwnd,
5193 stcb->asoc.total_flight,
5194 stcb->asoc.total_output_queue_size);
5199 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5202 uint32_t cum_ack, a_rwnd;
5204 cum_ack = ntohl(cp->cumulative_tsn_ack);
5205 /* Arrange so a_rwnd does NOT change */
5206 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5208 /* Now call the express sack handling */
5209 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5213 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5214 struct sctp_stream_in *strmin)
5216 struct sctp_queued_to_read *ctl, *nctl;
5217 struct sctp_association *asoc;
5221 tt = strmin->last_sequence_delivered;
5223 * First deliver anything prior to and including the stream no that
5226 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5227 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5228 /* this is deliverable now */
5229 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5230 /* subtract pending on streams */
5231 asoc->size_on_all_streams -= ctl->length;
5232 sctp_ucount_decr(asoc->cnt_on_all_streams);
5233 /* deliver it to at least the delivery-q */
5234 if (stcb->sctp_socket) {
5235 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5236 sctp_add_to_readq(stcb->sctp_ep, stcb,
5238 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5241 /* no more delivery now. */
5246 * now we must deliver things in queue the normal way if any are
5249 tt = strmin->last_sequence_delivered + 1;
5250 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5251 if (tt == ctl->sinfo_ssn) {
5252 /* this is deliverable now */
5253 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5254 /* subtract pending on streams */
5255 asoc->size_on_all_streams -= ctl->length;
5256 sctp_ucount_decr(asoc->cnt_on_all_streams);
5257 /* deliver it to at least the delivery-q */
5258 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5259 if (stcb->sctp_socket) {
5260 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5261 sctp_add_to_readq(stcb->sctp_ep, stcb,
5263 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5266 tt = strmin->last_sequence_delivered + 1;
5274 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5275 struct sctp_association *asoc,
5276 uint16_t stream, uint16_t seq)
5278 struct sctp_tmit_chunk *chk, *nchk;
5280 /* For each one on here see if we need to toss it */
5282 * For now large messages held on the reasmqueue that are complete
5283 * will be tossed too. We could in theory do more work to spin
5284 * through and stop after dumping one msg aka seeing the start of a
5285 * new msg at the head, and call the delivery function... to see if
5286 * it can be delivered... But for now we just dump everything on the
5289 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5291 * Do not toss it if on a different stream or marked for
5292 * unordered delivery in which case the stream sequence
5293 * number has no meaning.
5295 if ((chk->rec.data.stream_number != stream) ||
5296 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5299 if (chk->rec.data.stream_seq == seq) {
5300 /* It needs to be tossed */
5301 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5302 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5303 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5304 asoc->str_of_pdapi = chk->rec.data.stream_number;
5305 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5306 asoc->fragment_flags = chk->rec.data.rcv_flags;
5308 asoc->size_on_reasm_queue -= chk->send_size;
5309 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5311 /* Clear up any stream problem */
5312 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5313 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5315 * We must dump forward this streams
5316 * sequence number if the chunk is not
5317 * unordered that is being skipped. There is
5318 * a chance that if the peer does not
5319 * include the last fragment in its FWD-TSN
5320 * we WILL have a problem here since you
5321 * would have a partial chunk in queue that
5322 * may not be deliverable. Also if a Partial
5323 * delivery API as started the user may get
5324 * a partial chunk. The next read returning
5325 * a new chunk... really ugly but I see no
5326 * way around it! Maybe a notify??
5328 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5331 sctp_m_freem(chk->data);
5334 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5335 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5337 * If the stream_seq is > than the purging one, we
5347 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5348 struct sctp_forward_tsn_chunk *fwd,
5349 int *abort_flag, struct mbuf *m, int offset)
5351 /* The pr-sctp fwd tsn */
5353 * here we will perform all the data receiver side steps for
5354 * processing FwdTSN, as required in by pr-sctp draft:
5356 * Assume we get FwdTSN(x):
5358 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5359 * others we have 3) examine and update re-ordering queue on
5360 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5361 * report where we are.
5363 struct sctp_association *asoc;
5364 uint32_t new_cum_tsn, gap;
5365 unsigned int i, fwd_sz, m_size;
5367 struct sctp_stream_in *strm;
5368 struct sctp_tmit_chunk *chk, *nchk;
5369 struct sctp_queued_to_read *ctl, *sv;
5372 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5373 SCTPDBG(SCTP_DEBUG_INDATA1,
5374 "Bad size too small/big fwd-tsn\n");
5377 m_size = (stcb->asoc.mapping_array_size << 3);
5378 /*************************************************************/
5379 /* 1. Here we update local cumTSN and shift the bitmap array */
5380 /*************************************************************/
5381 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5383 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5384 /* Already got there ... */
5388 * now we know the new TSN is more advanced, let's find the actual
5391 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5392 asoc->cumulative_tsn = new_cum_tsn;
5393 if (gap >= m_size) {
5394 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5398 * out of range (of single byte chunks in the rwnd I
5399 * give out). This must be an attacker.
5402 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5403 0, M_DONTWAIT, 1, MT_DATA);
5405 struct sctp_paramhdr *ph;
5408 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5409 (sizeof(uint32_t) * 3);
5410 ph = mtod(oper, struct sctp_paramhdr *);
5411 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5412 ph->param_length = htons(SCTP_BUF_LEN(oper));
5413 ippp = (uint32_t *) (ph + 1);
5414 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5416 *ippp = asoc->highest_tsn_inside_map;
5418 *ippp = new_cum_tsn;
5420 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5421 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
5424 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5426 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5427 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5428 asoc->highest_tsn_inside_map = new_cum_tsn;
5430 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5431 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5433 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5434 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5437 SCTP_TCB_LOCK_ASSERT(stcb);
5438 for (i = 0; i <= gap; i++) {
5439 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5440 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5441 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5442 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5443 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5448 /*************************************************************/
5449 /* 2. Clear up re-assembly queue */
5450 /*************************************************************/
5452 * First service it if pd-api is up, just in case we can progress it
5455 if (asoc->fragmented_delivery_inprogress) {
5456 sctp_service_reassembly(stcb, asoc);
5458 /* For each one on here see if we need to toss it */
5460 * For now large messages held on the reasmqueue that are complete
5461 * will be tossed too. We could in theory do more work to spin
5462 * through and stop after dumping one msg aka seeing the start of a
5463 * new msg at the head, and call the delivery function... to see if
5464 * it can be delivered... But for now we just dump everything on the
5467 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5468 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5469 /* It needs to be tossed */
5470 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5471 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5472 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5473 asoc->str_of_pdapi = chk->rec.data.stream_number;
5474 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5475 asoc->fragment_flags = chk->rec.data.rcv_flags;
5477 asoc->size_on_reasm_queue -= chk->send_size;
5478 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5480 /* Clear up any stream problem */
5481 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5482 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5484 * We must dump forward this streams
5485 * sequence number if the chunk is not
5486 * unordered that is being skipped. There is
5487 * a chance that if the peer does not
5488 * include the last fragment in its FWD-TSN
5489 * we WILL have a problem here since you
5490 * would have a partial chunk in queue that
5491 * may not be deliverable. Also if a Partial
5492 * delivery API as started the user may get
5493 * a partial chunk. The next read returning
5494 * a new chunk... really ugly but I see no
5495 * way around it! Maybe a notify??
5497 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5500 sctp_m_freem(chk->data);
5503 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5506 * Ok we have gone beyond the end of the fwd-tsn's
5512 /*******************************************************/
5513 /* 3. Update the PR-stream re-ordering queues and fix */
5514 /* delivery issues as needed. */
5515 /*******************************************************/
5516 fwd_sz -= sizeof(*fwd);
5519 unsigned int num_str;
5520 struct sctp_strseq *stseq, strseqbuf;
5522 offset += sizeof(*fwd);
5524 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5525 num_str = fwd_sz / sizeof(struct sctp_strseq);
5526 for (i = 0; i < num_str; i++) {
5529 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5530 sizeof(struct sctp_strseq),
5531 (uint8_t *) & strseqbuf);
5532 offset += sizeof(struct sctp_strseq);
5533 if (stseq == NULL) {
5537 st = ntohs(stseq->stream);
5539 st = ntohs(stseq->sequence);
5540 stseq->sequence = st;
5545 * Ok we now look for the stream/seq on the read
5546 * queue where its not all delivered. If we find it
5547 * we transmute the read entry into a PDI_ABORTED.
5549 if (stseq->stream >= asoc->streamincnt) {
5550 /* screwed up streams, stop! */
5553 if ((asoc->str_of_pdapi == stseq->stream) &&
5554 (asoc->ssn_of_pdapi == stseq->sequence)) {
5556 * If this is the one we were partially
5557 * delivering now then we no longer are.
5558 * Note this will change with the reassembly
5561 asoc->fragmented_delivery_inprogress = 0;
5563 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5564 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5565 if ((ctl->sinfo_stream == stseq->stream) &&
5566 (ctl->sinfo_ssn == stseq->sequence)) {
5567 str_seq = (stseq->stream << 16) | stseq->sequence;
5569 ctl->pdapi_aborted = 1;
5570 sv = stcb->asoc.control_pdapi;
5571 stcb->asoc.control_pdapi = ctl;
5572 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5574 SCTP_PARTIAL_DELIVERY_ABORTED,
5576 SCTP_SO_NOT_LOCKED);
5577 stcb->asoc.control_pdapi = sv;
5579 } else if ((ctl->sinfo_stream == stseq->stream) &&
5580 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5581 /* We are past our victim SSN */
5585 strm = &asoc->strmin[stseq->stream];
5586 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5587 /* Update the sequence number */
5588 strm->last_sequence_delivered = stseq->sequence;
5590 /* now kick the stream the new way */
5591 /* sa_ignore NO_NULL_CHK */
5592 sctp_kick_prsctp_reorder_queue(stcb, strm);
5594 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5597 * Now slide thing forward.
5599 sctp_slide_mapping_arrays(stcb);
5601 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5602 /* now lets kick out and check for more fragmented delivery */
5603 /* sa_ignore NO_NULL_CHK */
5604 sctp_deliver_reasm_check(stcb, &stcb->asoc);